[
  {
    "path": ".github/CODEOWNERS",
    "content": "* @rancher/orbs\n"
  },
  {
    "path": ".github/renovate.json",
    "content": "{\n  \"extends\": [\"github>rancher/renovate-config#release\"],\n  \"baseBranches\": [\"main\"],\n  \"packageRules\": [\n    {\n      \"matchDepNames\": [\n        \"kubernetes/kubernetes\"\n      ],\n      \"allowedVersions\": \"<1.26.0\"\n    }\n  ]\n}\n"
  },
  {
    "path": ".github/workflows/fossa.yml",
    "content": "name: FOSSA Scanning\n\non:\n  push:\n    branches: [\"main\", \"master\", \"release/**\"]\n  workflow_dispatch:\n\npermissions:\n  contents: read\n  id-token: write\n\njobs:\n  fossa-scanning:\n    runs-on: ubuntu-latest\n    timeout-minutes: 30\n    steps:\n    - name: Checkout\n      uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6\n\n    # The FOSSA token is shared between all repos in Rancher's GH org. It can be\n    # used directly and there is no need to request specific access to EIO.\n    - name: Read FOSSA token\n      uses: rancher-eio/read-vault-secrets@main\n      with:\n        secrets: |\n          secret/data/github/org/rancher/fossa/push token | FOSSA_API_KEY_PUSH_ONLY\n\n    - name: FOSSA scan\n      uses: fossas/fossa-action@main\n      with:\n        api-key: ${{ env.FOSSA_API_KEY_PUSH_ONLY }}\n        # Only runs the scan and do not provide/returns any results back to the\n        # pipeline.\n        run-tests: false\n"
  },
  {
    "path": ".github/workflows/pull-request.yaml",
    "content": "name: Pull request checks\n\non: [pull_request]\n\njobs:\n  shellcheck:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4\n\n      - name: Run shellcheck\n        run: |\n          sudo apt install shellcheck\n          shellcheck -S warning *.sh\n\n  build:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4\n\n      - name: Build docker image\n        run: docker build -t ${{ github.repository_owner }}/rancher-cleanup:test .\n"
  },
  {
    "path": ".github/workflows/release.yaml",
    "content": "name: Release\n\non:\n  push:\n    tags:\n      - 'v*'\n  workflow_dispatch:\n    inputs:\n      imageTag:\n        description: Base Image Tag (before the :)\n        type: string\n        required: false\n        default: rancher/rancher-cleanup\n\njobs:\n  shellcheck:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4\n\n      - name: Run shellcheck\n        run: |\n          sudo apt install shellcheck\n          shellcheck -S warning *.sh\n\n  test-build:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4\n\n      - name: Build docker image\n        run: docker build -t ${{ github.repository_owner }}/rancher-cleanup:test .\n\n  release:\n    permissions:\n      id-token: write\n      contents: write\n    runs-on: ubuntu-latest\n    needs:\n      - shellcheck\n      - test-build\n    steps:\n      - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4\n\n      - name: Set up QEMU\n        uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3\n\n      - name: Get dockerhub username and password from vault\n        if: ${{ github.repository == 'rancher/rancher-cleanup' }}\n        uses: rancher-eio/read-vault-secrets@0da85151ad1f19ed7986c41587e45aac1ace74b6 # v3\n        with:\n          secrets: |\n            secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials username | DOCKER_USERNAME ;\n            secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials password | DOCKER_PASSWORD\n\n      - name: Log into Docker Hub\n        uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3\n        with:\n          username: ${{ env.DOCKER_USERNAME || secrets.DOCKER_USERNAME }}\n          password: ${{ env.DOCKER_PASSWORD || secrets.DOCKER_PASSWORD }}\n\n      - name: Create github release\n        run: scripts/github-release\n        env:\n          TAG: ${{ github.ref_name }}\n          GH_REPO: ${{ github.repository }}\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n      - name: Build and push docker image (prerelease)\n        if: ${{ contains(github.ref_name, 'rc') || contains(github.ref_name, 'alpha') }}\n        uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5\n        env:\n          IMAGE_TAG: ${{ inputs.imageTag || github.repository }}\n        with:\n          platforms: linux/amd64\n          push: true\n          tags: |\n            ${{ env.IMAGE_TAG }}:${{ github.ref_name }}\n            ${{ env.IMAGE_TAG }}:${{ github.ref_name }}-amd64\n\n      - name: Build and push docker image (full release)\n        if: ${{ !contains(github.ref_name, 'rc') && !contains(github.ref_name, 'alpha') }}\n        uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5\n        env:\n          IMAGE_TAG: ${{ inputs.imageTag || github.repository }}\n        with:\n          platforms: linux/amd64\n          push: true\n          tags: |\n            ${{ env.IMAGE_TAG }}:${{ github.ref_name }}\n            ${{ env.IMAGE_TAG }}:${{ github.ref_name }}-amd64\n            ${{ env.IMAGE_TAG }}:latest\n            ${{ env.IMAGE_TAG }}:latest-amd64\n\n      - name: Attest build provenance\n        uses: actions/attest@59d89421af93a897026c735860bf21b6eb4f7b26 # v4.1.0\n        with:\n          subject-checksums: checksums.txt\n\n      - name: Publish github release\n        shell: bash\n        run: gh release edit ${{ github.ref_name }} --draft=false\n        env:\n          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/renovate.yml",
    "content": "name: Renovate\non:\n  workflow_dispatch:\n    inputs:\n      logLevel:\n        description: \"Override default log level\"\n        required: false\n        default: \"info\"\n        type: string\n      overrideSchedule:\n        description: \"Override all schedules\"\n        required: false\n        default: \"false\"\n        type: string\n  # Run twice in the early morning (UTC) for initial and follow up steps (create pull request and merge)\n  schedule:\n    - cron: '30 4,6 * * *'\n\njobs:\n  call-workflow:\n    uses: rancher/renovate-config/.github/workflows/renovate.yml@c88cbe41a49d02648b9bf83aa5a64902151323fa # release\n    with:\n      logLevel: ${{ inputs.logLevel || 'info' }}\n      overrideSchedule: ${{ github.event.inputs.overrideSchedule == 'true' && '{''schedule'':null}' || '' }}\n    secrets: inherit\n"
  },
  {
    "path": ".gitignore",
    "content": "/.dapper\n/.cache\n/bin\n/dist\n*.swp\n.idea\n./backup\n./backup-restore-operator\nbackup-restore-operator\n/secretExamples\ntest-backup-location-local/\n.DS_Store\n/backups\nsha256sum-amd64.txt\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM registry.suse.com/bci/bci-base:15.7\n\nENV KUBECTL_VERSION=v1.30.14\nENV KUBECTL_SUM_AMD64=7ccac981ece0098284d8961973295f5124d78eab7b89ba5023f35591baa16271\n\nWORKDIR /usr/local/bin\nRUN set -eux; \\\n    curl -LO \"https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl\"; \\\n    echo \"${KUBECTL_SUM_AMD64}  kubectl\" | sha256sum -c -; \\\n    chmod +x kubectl\n\nCOPY cleanup.sh verify.sh /usr/local/bin/\nRUN chmod +x /usr/local/bin/cleanup.sh /usr/local/bin/verify.sh\n\nENTRYPOINT [\"/usr/local/bin/cleanup.sh\"]\n"
  },
  {
    "path": "README.md",
    "content": "# Rancher resource cleanup script\n\n**Warning**\n```\nTHIS WILL DELETE ALL RESOURCES CREATED BY RANCHER\nMAKE SURE YOU HAVE CREATED AND TESTED YOUR BACKUPS\nTHIS IS A NON REVERSIBLE ACTION\n```\n\nThis script will delete all Kubernetes resources belonging to/created by Rancher (including installed tools like logging/monitoring/opa gatekeeper/etc). Note: this does not remove any Longhorn resources.\n\n\n## Using the cleanup script\n\n### Run as a Kubernetes Job\n\n* Deploy the job using `kubectl create -f deploy/rancher-cleanup.yaml`\n* Watch logs using `kubectl  -n kube-system logs -l job-name=cleanup-job  -f`\n\n\n## Verify\n\n* Deploy the job using `kubectl create -f deploy/verify.yaml`\n* Watch logs using `kubectl  -n kube-system logs -l job-name=verify-job  -f`, output should be empty (besides deprecation warnings)\n* Check completed logs using `kubectl  -n kube-system logs -l job-name=verify-job  -f | grep -v \"is deprecated\"`, this will exclude deprecation warnings.\n\n\n## Developing\n\n### How to Make a Release\n\nReleases are done via github actions, and triggered by pushing a\ntag to the remote that starts with `v`. There are two types of\nreleases: \"pre\" and \"full\" release. To make a prerelease, push a\ntag that contains the string `rc` or `alpha` (for example, `v1.2.3-rc1`\nor `v1.2.3-alpha1`). To make a full release, push a tag that does\nnot contain either of these strings (for example, `v1.2.3`).\n"
  },
  {
    "path": "cleanup.sh",
    "content": "#!/bin/bash\n# Overridden on package\nSCRIPT_VERSION=\"unreleased\"\necho \"Running cleanup.sh version ${SCRIPT_VERSION}\"\n\n# Warning\necho \"==================== WARNING ====================\"\necho \"THIS WILL DELETE ALL RESOURCES CREATED BY RANCHER\"\necho \"MAKE SURE YOU HAVE CREATED AND TESTED YOUR BACKUPS\"\necho \"THIS IS A NON REVERSIBLE ACTION\"\necho \"==================== WARNING ====================\"\n\n# Linux only for now\nif [ \"$(uname -s)\" != \"Linux\" ]; then\n  echo \"Must be run on Linux\"\n  exit 1\nfi\n\n# Check kubectl existence\nif ! type kubectl >/dev/null 2>&1; then\n  echo \"kubectl not found in PATH, make sure kubectl is available\"\n  exit 1\nfi\n\n# Check timeout existence\nif ! type timeout >/dev/null 2>&1; then\n  echo \"timeout not found in PATH, make sure timeout is available\"\n  exit 1\nfi\n\n\n# Test connectivity\nif ! kubectl get nodes >/dev/null 2>&1; then\n  echo \"'kubectl get nodes' exited non-zero, make sure environment variable KUBECONFIG is set to a working kubeconfig file\"\n  exit 1\nfi\n\necho \"=> Printing cluster info for confirmation\"\nkubectl cluster-info\nkubectl get nodes -o wide\n\nif [ \"$1\" != \"force\" ]; then\n    echo \"Do you want to continue (y/n)?\"\n    read -r answer\n\n    if [ \"$answer\" != \"y\" ]; then\n        exit 1\n    fi\nfi\n\nkcd()\n{\n    i=\"0\"\n    while [ $i -lt 4 ]; do\n        if timeout 21 sh -c 'kubectl delete --ignore-not-found=true --grace-period=15 --timeout=20s '\"$*\"''; then\n            break\n        fi\n        i=$((i+1))\n    done\n}\n\nkcpf()\n{\n  FINALIZERS=$(kubectl get -o jsonpath=\"{.metadata.finalizers}\" \"$@\")\n  if [ \"x${FINALIZERS}\" != \"x\" ]; then\n      echo \"Finalizers before for ${*}: ${FINALIZERS}\"\n      kubectl patch -p '{\"metadata\":{\"finalizers\":null}}' --type=merge \"$@\"\n      echo \"Finalizers after for ${*}: $(kubectl get -o jsonpath=\"{.metadata.finalizers}\" \"${@}\")\"\n  fi\n}\n\nkcdns()\n{\n  if kubectl get namespace \"$1\"; then\n    kcpf namespace \"$1\"\n    FINALIZERS=$(kubectl get -o jsonpath=\"{.spec.finalizers}\" namespace \"$1\")\n    if [ \"x${FINALIZERS}\" != \"x\" ]; then\n        echo \"Finalizers before for namespace ${1}: ${FINALIZERS}\"\n        kubectl get -o json namespace \"$1\" | tr -d \"\\n\" | sed \"s/\\\"finalizers\\\": \\[[^]]\\+\\]/\\\"finalizers\\\": []/\"   | kubectl replace --raw /api/v1/namespaces/$1/finalize -f -\n        echo \"Finalizers after for namespace ${1}: $(kubectl get -o jsonpath=\"{.spec.finalizers}\" namespace ${1})\"\n    fi\n    i=\"0\"\n    while [ $i -lt 4 ]; do\n        if timeout 21 sh -c 'kubectl delete --ignore-not-found=true --grace-period=15 --timeout=20s namespace '\"$1\"''; then\n            break\n        fi\n        i=$((i+1))\n    done\n  fi\n}\n\nprintapiversion()\n{\nif echo \"$1\" | grep -q '/'; then\n  echo \"$1\" | cut -d'/' -f1\nelse\n  echo \"\"\nfi\n}\n\nset -x\n# Namespaces with resources that probably have finalizers/dependencies (needs manual traverse to patch and delete else it will hang)\nCATTLE_NAMESPACES=\"local cattle-system cattle-impersonation-system cattle-global-data cattle-global-nt cattle-provisioning-capi-system cattle-turtles-system cattle-capi-system\"\nTOOLS_NAMESPACES=\"istio-system cattle-resources-system cis-operator-system cattle-dashboards cattle-gatekeeper-system cattle-alerting cattle-logging cattle-pipeline cattle-prometheus rancher-operator-system cattle-monitoring-system cattle-logging-system cattle-elemental-system\"\nFLEET_NAMESPACES=\"cattle-fleet-clusters-system cattle-fleet-local-system cattle-fleet-system fleet-default fleet-local fleet-system\"\n\n# Delete rancher install to not have anything running that (re)creates resources\nkcd \"-n cattle-system deploy,ds --all\"\nkubectl -n cattle-system wait --for delete pod --selector=app=rancher\n# Delete the only resource not in cattle namespaces\nkcd \"-n kube-system configmap cattle-controllers\"\n\n# Delete any blocking webhooks from preventing requests\nif kubectl get mutatingwebhookconfigurations -o name | grep -q cattle\\.io; then\n    kcd \"$(kubectl get mutatingwebhookconfigurations -o name | grep cattle\\.io)\"\nfi\nif kubectl get validatingwebhookconfigurations -o name | grep -q cattle\\.io; then\n    kcd \"$(kubectl get validatingwebhookconfigurations -o name | grep cattle\\.io)\"\nfi\n\n# Delete any monitoring webhooks\nif kubectl get mutatingwebhookconfigurations -o name | grep -q rancher-monitoring; then\n    kcd \"$(kubectl get mutatingwebhookconfigurations -o name | grep rancher-monitoring)\"\nfi\nif kubectl get validatingwebhookconfigurations -o name | grep -q rancher-monitoring; then\n    kcd \"$(kubectl get validatingwebhookconfigurations -o name | grep rancher-monitoring)\"\nfi\n# Delete any gatekeeper webhooks\nif kubectl get validatingwebhookconfigurations -o name | grep -q gatekeeper; then\n    kcd \"$(kubectl get validatingwebhookconfigurations -o name | grep gatekeeper)\"\nfi\n\n# Delete any istio webhooks\nif kubectl get mutatingwebhookconfigurations -o name | grep -q istio;  then\n    kcd \"$(kubectl get mutatingwebhookconfigurations -o name | grep istio)\"\nfi\nif kubectl get validatingwebhookconfigurations -o name | grep -q istio; then\n    kcd \"$(kubectl get validatingwebhookconfigurations -o name | grep istio)\"\nfi\n\n# Delete any capi webhooks\nif kubectl get mutatingwebhookconfigurations -o name | grep -q capi;  then\n    kcd \"$(kubectl get mutatingwebhookconfigurations -o name | grep capi)\"\nfi\nif kubectl get validatingwebhookconfigurations -o name | grep -q capi; then\n    kcd \"$(kubectl get validatingwebhookconfigurations -o name | grep capi)\"\nfi\n\n# Cluster api\nif [ -n \"$(kubectl get validatingwebhookconfiguration.admissionregistration.k8s.io/validating-webhook-configuration)\" ]; then\n    kcd validatingwebhookconfiguration.admissionregistration.k8s.io/validating-webhook-configuration\nfi\nif [ -n \"$(kubectl get mutatingwebhookconfiguration.admissionregistration.k8s.io/mutating-webhook-configuration)\" ]; then\n    kcd mutatingwebhookconfiguration.admissionregistration.k8s.io/mutating-webhook-configuration\nfi\n\n# Delete generic k8s resources either labeled with norman or resource name starting with \"cattle|rancher|fleet\"\n# ClusterRole/ClusterRoleBinding\nkubectl get clusterrolebinding -l cattle.io/creator=norman --no-headers -o custom-columns=NAME:.metadata.name | while read -r CRB; do\n  kcpf clusterrolebindings \"$CRB\"\n  kcd \"clusterrolebindings \"\"$CRB\"\"\"\ndone\n\nkubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^cattle- | while read -r CRB; do\n  kcpf clusterrolebindings \"$CRB\"\n  kcd \"clusterrolebindings \"\"$CRB\"\"\"\ndone\n\nkubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep rancher | while read -r CRB; do\n  kcpf clusterrolebindings \"$CRB\"\n  kcd \"clusterrolebindings \"\"$CRB\"\"\"\ndone\n\nkubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^fleet- | while read -r CRB; do\n  kcpf clusterrolebindings \"$CRB\"\n  kcd \"clusterrolebindings \"\"$CRB\"\"\"\ndone\n\nkubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^gitjob | while read -r CRB; do\n  kcpf clusterrolebindings \"$CRB\"\n  kcd \"clusterrolebindings \"\"$CRB\"\"\"\ndone\n\nkubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^pod-impersonation-helm- | while read -r CRB; do\n  kcpf clusterrolebindings \"$CRB\"\n  kcd \"clusterrolebindings \"\"$CRB\"\"\"\ndone\n\nkubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^gatekeeper | while read -r CRB; do\n  kcpf clusterrolebindings \"$CRB\"\n  kcd \"clusterrolebindings \"\"$CRB\"\"\"\ndone\n\nkubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^cis | while read -r CRB; do\n  kcpf clusterrolebindings \"$CRB\"\n  kcd \"clusterrolebindings \"\"$CRB\"\"\"\ndone\n\nkubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^istio | while read -r CRB; do\n  kcpf clusterrolebindings \"$CRB\"\n  kcd \"clusterrolebindings \"\"$CRB\"\"\"\ndone\n\nkubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^elemental | while read -r CRB; do\n  kcpf clusterrolebindings \"$CRB\"\n  kcd \"clusterrolebindings \"\"$CRB\"\"\"\ndone\n\nkubectl  get clusterroles -l cattle.io/creator=norman --no-headers -o custom-columns=NAME:.metadata.name | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\nkubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^cattle- | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\nkubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep rancher | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\nkubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^fleet | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\nkubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^gitjob | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\nkubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^pod-impersonation-helm | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\nkubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^logging- | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\nkubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^monitoring- | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\nkubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^gatekeeper | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\nkubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^cis | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\nkubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^istio | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\nkubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^elemental | while read -r CR; do\n  kcpf clusterroles \"$CR\"\n  kcd \"clusterroles \"\"$CR\"\"\"\ndone\n\n# Bulk delete data CRDs\n# Saves time in the loop below where we patch/delete individual resources\nDATACRDS=\"settings.management.cattle.io authconfigs.management.cattle.io features.management.cattle.io rkeaddons.management.cattle.io rkek8sserviceoptions.management.cattle.io rkek8ssystemimages.management.cattle.io catalogtemplateversions.management.cattle.io catalogtemplates.management.cattle.io rkeaddons.management.cattle.io tokens.management.cattle.io elemental.cattle.io\"\nfor CRD in $DATACRDS; do\n  kcd \"crd $CRD\"\ndone\n\n# Delete apiservice\nfor APISERVICE in $(kubectl  get apiservice -o name | grep cattle | grep -v k3s\\.cattle\\.io | grep -v helm\\.cattle\\.io) $(kubectl  get apiservice -o name | grep gatekeeper\\.sh) $(kubectl  get apiservice -o name | grep istio\\.io) $(kubectl  get apiservice elemental-operator) apiservice\\.apiregistration\\.k8s\\.io\\/v1beta1\\.custom\\.metrics\\.k8s\\.io; do\n  kcd \"$APISERVICE\"\ndone\n\n# Pod security policies\n#Check if psps are available on the target cluster\nkubectl get podsecuritypolicy > /dev/null 2>&1\n\n# Check the exit code and only run if there are psps available on the cluster\nif [ $? -ne 0 ]; then\n  echo \"Removing PSPs\"\n\n  # Rancher logging\n  for PSP in $(kubectl get podsecuritypolicy -o name -l app.kubernetes.io/name=rancher-logging) podsecuritypolicy.policy/rancher-logging-rke-aggregator; do\n    kcd \"$PSP\"\n  done\n\n  # Rancher monitoring\n  for PSP in $(kubectl  get podsecuritypolicy -o name -l release=rancher-monitoring) $(kubectl get podsecuritypolicy -o name -l app=rancher-monitoring-crd-manager) $(kubectl get podsecuritypolicy -o name -l app=rancher-monitoring-patch-sa) $(kubectl get podsecuritypolicy -o name -l app.kubernetes.io/instance=rancher-monitoring); do\n    kcd \"$PSP\"\n  done\n\n  # Rancher OPA\n  for PSP in $(kubectl  get podsecuritypolicy -o name -l release=rancher-gatekeeper) $(kubectl get podsecuritypolicy -o name -l app=rancher-gatekeeper-crd-manager); do\n    kcd \"$PSP\"\n  done\n\n  # Backup restore operator\n  for PSP in $(kubectl get podsecuritypolicy -o name -l app.kubernetes.io/name=rancher-backup); do\n    kcd \"$PSP\"\n  done\n\n  # Istio\n  for PSP in istio-installer istio-psp kiali-psp psp-istio-cni; do\n    kcd \"podsecuritypolicy $PSP\"\n  done\nelse \n  echo \"Kubernetes version v1.25 or higher, skipping PSP removal\"\nfi\n\n# Get all namespaced resources and delete in loop\n# Exclude helm.cattle.io and k3s.cattle.io to not break K3S/RKE2 addons\nkubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep cattle\\.io | grep -v helm\\.cattle\\.io | grep -v k3s\\.cattle\\.io | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do\n  kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n  kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\ndone\n\n# Logging\nkubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep logging\\.banzaicloud\\.io | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do\n  kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n  kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\ndone\n\nkubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep -v events\\.events\\.k8s\\.io | grep -v ^events$ | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | grep rancher-monitoring | while read -r NAME NAMESPACE KIND APIVERSION; do\n  kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n  kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\ndone\n\n# Monitoring\nkubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep monitoring\\.coreos\\.com | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do\n  kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n  kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\ndone\n\n# Gatekeeper\nkubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep gatekeeper\\.sh | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do\n  kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n  kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\ndone\n\n# Cluster-api\nkubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep cluster\\.x-k8s\\.io | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do\n  kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n  kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\ndone\n\n# Get all non-namespaced resources and delete in loop\nkubectl get \"$(kubectl api-resources --namespaced=false --verbs=delete -o name| grep cattle\\.io | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o name | while read -r NAME; do\n  kcpf \"$NAME\"\n  kcd \"$NAME\"\ndone\n\n# Logging\nkubectl get \"$(kubectl api-resources --namespaced=false --verbs=delete -o name| grep logging\\.banzaicloud\\.io | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o name | while read -r NAME; do\n  kcpf \"$NAME\"\n  kcd \"$NAME\"\ndone\n\n# Gatekeeper\nkubectl get \"$(kubectl api-resources --namespaced=false --verbs=delete -o name| grep gatekeeper\\.sh | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o name | while read -r NAME; do\n  kcpf \"$NAME\"\n  kcd \"$NAME\"\ndone\n\n# Delete istio certs\nfor NS in $(kubectl  get ns --no-headers -o custom-columns=NAME:.metadata.name); do\n  kcd \"-n ${NS} configmap istio-ca-root-cert\"\ndone\n\n# Delete all cattle namespaces, including project namespaces (p-),cluster (c-),cluster-fleet and user (user-) namespaces\nfor NS in $TOOLS_NAMESPACES $FLEET_NAMESPACES $CATTLE_NAMESPACES; do\n  kubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\\.events\\.k8s\\.io | grep -v ^events$ | tr \"\\n\" \",\" | sed -e 's/,$//')\" -n \"$NS\" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do\n    kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n    kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\n  done\n\n  kcdns \"$NS\"\ndone\n\nfor NS in $(kubectl get namespace --no-headers -o custom-columns=NAME:.metadata.name | grep \"^cluster-fleet\"); do\n  kubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\\.events\\.k8s\\.io | grep -v ^events$ | tr \"\\n\" \",\" | sed -e 's/,$//')\" -n \"$NS\" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do\n    kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n    kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\n  done\n\n  kcdns \"$NS\"\ndone\n\nfor NS in $(kubectl get namespace --no-headers -o custom-columns=NAME:.metadata.name | grep \"^p-\"); do\n  kubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\\.events\\.k8s\\.io | grep -v ^events$ | tr \"\\n\" \",\" | sed -e 's/,$//')\" -n \"$NS\" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do\n    kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n    kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\n  done\n\n  kcdns \"$NS\"\ndone\n\nfor NS in $(kubectl get namespace --no-headers -o custom-columns=NAME:.metadata.name | grep \"^c-\"); do\n  kubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\\.events\\.k8s\\.io | grep -v ^events$ | tr \"\\n\" \",\" | sed -e 's/,$//')\" -n \"$NS\" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do\n    kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n    kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\n  done\n\n  kcdns \"$NS\"\ndone\n\nfor NS in $(kubectl get namespace --no-headers -o custom-columns=NAME:.metadata.name | grep \"^user-\"); do\n  kubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\\.events\\.k8s\\.io | grep -v ^events$ | tr \"\\n\" \",\" | sed -e 's/,$//')\" -n \"$NS\" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do\n    kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n    kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\n  done\n\n  kcdns \"$NS\"\ndone\n\nfor NS in $(kubectl get namespace --no-headers -o custom-columns=NAME:.metadata.name | grep \"^u-\"); do\n  kubectl get \"$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\\.events\\.k8s\\.io | grep -v ^events$ | tr \"\\n\" \",\" | sed -e 's/,$//')\" -n \"$NS\" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do\n    kcpf -n \"$NAMESPACE\" \"${KIND}.$(printapiversion \"$APIVERSION\")\" \"$NAME\"\n    kcd \"-n \"\"$NAMESPACE\"\" ${KIND}.$(printapiversion \"$APIVERSION\") \"\"$NAME\"\"\"\n  done\n\n  kcdns \"$NS\"\ndone\n\n# Delete logging CRDs\nfor CRD in $(kubectl get crd -o name | grep logging\\.banzaicloud\\.io); do\n  kcd \"$CRD\"\ndone\n\n# Delete monitoring CRDs\nfor CRD in $(kubectl get crd -o name | grep monitoring\\.coreos\\.com); do\n  kcd \"$CRD\"\ndone\n\n# Delete OPA CRDs\nfor CRD in $(kubectl get crd -o name | grep gatekeeper\\.sh); do\n  kcd \"$CRD\"\ndone\n\n# Delete Istio CRDs\nfor CRD in $(kubectl get crd -o name | grep istio\\.io); do\n  kcd \"$CRD\"\ndone\n\n# Delete cluster-api CRDs\nfor CRD in $(kubectl get crd -o name | grep cluster\\.x-k8s\\.io); do\n  kcd \"$CRD\"\ndone\n\n# Delete all cattle CRDs\n# Exclude helm.cattle.io and addons.k3s.cattle.io to not break RKE2 addons\nfor CRD in $(kubectl get crd -o name | grep cattle\\.io | grep -v helm\\.cattle\\.io | grep -v k3s\\.cattle\\.io); do\n  kcd \"$CRD\"\ndone\n"
  },
  {
    "path": "deploy/rancher-cleanup.yaml",
    "content": "---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: cleanup-service-account\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: cleanup-admin\nsubjects:\n- kind: ServiceAccount\n  name: cleanup-service-account\n  namespace: kube-system\nroleRef:\n  kind: ClusterRole\n  name: cluster-admin\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: cleanup-job\n  namespace: kube-system\n  labels:\n    app: cleanup\nspec:\n  template:\n    spec:\n      containers:\n      - name: cleanup\n        image: rancher/rancher-cleanup:latest\n        args: [ \"force\" ]\n        imagePullPolicy: Always\n      serviceAccountName: cleanup-service-account\n      restartPolicy: Never\n  backoffLimit: 4\n"
  },
  {
    "path": "deploy/verify.yaml",
    "content": "---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: cleanup-service-account\n  namespace: kube-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: cleanup-admin\nsubjects:\n- kind: ServiceAccount\n  name: cleanup-service-account\n  namespace: kube-system\nroleRef:\n  kind: ClusterRole\n  name: cluster-admin\n  apiGroup: rbac.authorization.k8s.io\n---\napiVersion: batch/v1\nkind: Job\nmetadata:\n  name: verify-job\n  namespace: kube-system\n  labels:\n    app: verify\nspec:\n  template:\n    spec:\n      containers:\n      - name: verify\n        image: rancher/rancher-cleanup:latest\n        command: [ \"verify.sh\" ]\n        imagePullPolicy: Always\n      serviceAccountName: cleanup-service-account\n      restartPolicy: Never\n  backoffLimit: 0\n"
  },
  {
    "path": "scripts/github-release",
    "content": "#!/bin/bash\n\n# This script requires the following environment variables to be set:\n# TAG: the remote tag that you want the release to be based off of\n# GH_REPO: the github repo to create the release on, in <owner>/<repo> format\n\nset -e\n\nif [ -z \"$TAG\" ]; then\n  echo \"Must specify TAG environment variable\"\n  exit 1\nfi\nif [ -z \"$GH_REPO\" ]; then\n  echo \"Must specify GH_REPO environment variable\"\n  exit 1\nfi\n\n# create checksum file\nCHECKSUM_FILE='checksums.txt'\nsha256sum cleanup.sh verify.sh > $CHECKSUM_FILE\n\nif echo \"$TAG\" | grep -Eq \"rc|alpha\"; then\n  gh release create --draft --verify-tag --generate-notes --prerelease --latest=false \"$TAG\" cleanup.sh verify.sh $CHECKSUM_FILE\nelse\n  gh release create --draft --verify-tag --generate-notes \"$TAG\" cleanup.sh verify.sh $CHECKSUM_FILE\nfi\n"
  },
  {
    "path": "verify.sh",
    "content": "#!/bin/bash\n# Overridden on package\nSCRIPT_VERSION=\"unreleased\"\necho \"Running verify.sh version ${SCRIPT_VERSION}\"\n\nkcg()\n{\n  kubectl get --ignore-not-found=true \"$@\"\n}\n\nkcg -n cattle-system deploy,ds\nkcg -n kube-system configmap cattle-controllers\nkcg mutatingwebhookconfigurations -o name | grep cattle\\.io\nkcg mutatingwebhookconfigurations -o name | grep rancher-monitoring\nkcg mutatingwebhookconfigurations -o name | grep istio\nkcg mutatingwebhookconfigurations -o name | grep mutating-webhook-configuration\n\nkcg validatingwebhookconfigurations -o name | grep cattle\\.io\nkcg validatingwebhookconfigurations -o name | grep rancher-monitoring\nkcg validatingwebhookconfigurations -o name | grep gatekeeper\nkcg validatingwebhookconfigurations -o name | grep istio\nkcg validatingwebhookconfigurations -o name | grep validating-webhook-configuration\n\nkcg apiservice -o name | grep cattle\\.io | grep -v k3s\\.cattle\\.io | grep -v helm\\.cattle\\.io\nkcg apiservice -o name | grep istio\nkcg apiservice -o name | grep gatekeeper\nkcg apiservice -o name | grep custom\\.metrics\\.k8s\\.io\nkcg apiservice -o name | grep elemental\n\nkcg clusterrolebinding -l cattle.io/creator=norman\nkcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^cattle-\nkcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep rancher \nkcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^fleet-\nkcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^gitjob\nkcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^pod-impersonation-helm\nkcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^gatekeeper\nkcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^cis\nkcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^istio\nkcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^elemental\n\nkcg clusterroles -l cattle.io/creator=norman\nkcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^cattle-\nkcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep rancher\nkcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^fleet\nkcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^gitjob\nkcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^pod-impersonation-helm\nkcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^logging-\nkcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^monitoring-\nkcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^gatekeeper\nkcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^cis\nkcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^istio\nkcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^elemental\n\n\n# Pod security policies\n#Check if psps are available on the target cluster\nkubectl get podsecuritypolicy > /dev/null 2>&1\n\n# Check the exit code and only run if there are psps available on the cluster\nif [ $? -ne 0 ]; then\n  echo \"Checking for PSPs\"\n  kcg podsecuritypolicy -o name -l app.kubernetes.io/name=rancher-logging\n  kcg podsecuritypolicy.policy/rancher-logging-rke-aggregator\n\n  kcg podsecuritypolicy -o name -l release=rancher-monitoring\n  kcg podsecuritypolicy -o name -l app=rancher-monitoring-crd-manager\n  kcg podsecuritypolicy -o name -l app=rancher-monitoring-patch-sa\n  kcg podsecuritypolicy -o name -l app.kubernetes.io/instance=rancher-monitoring\n\n  kcg podsecuritypolicy -o name -l release=rancher-gatekeeper\n  kcg podsecuritypolicy -o name -l app=rancher-gatekeeper-crd-manager\n\n  kcg podsecuritypolicy -o name -l app.kubernetes.io/name=rancher-backup\n  kcg podsecuritypolicy -o name | grep istio-installer\n  kcg podsecuritypolicy -o name | grep istio-psp\n  kcg podsecuritypolicy -o name | grep kiali-psp\n  kcg podsecuritypolicy -o name | grep psp-istio-cni\nelse \n  echo \"Kubernetes version v1.25 or higher, skipping PSP check\"\nfi\n\nkcg namespace -o name | grep \"^cattle\"\nkcg namespace -o name | grep \"rancher-operator-system\"\nkcg namespace -o name | grep \"cis-operator-system\"\nkcg namespace -o name | grep \"^c-\"\nkcg namespace -o name | grep \"^p-\"\nkcg namespace -o name | grep \"^user-\"\nkcg namespace -o name | grep \"^u-\"\nkcg namespace -o name | grep \"fleet\"\nkcg namespace -o name | grep \"istio\"\nkcg namespace -o name | grep \"elemental\"\n\nkcg \"$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep logging\\.banzaicloud\\.io | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion 2>/dev/null\nkcg \"$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep -v events\\.events\\.k8s\\.io | grep -v ^events$ | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | grep rancher-monitoring 2>/dev/null\nkcg \"$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep monitoring\\.coreos\\.com | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion 2>/dev/null\nkcg \"$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep gatekeeper\\.sh | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion 2>/dev/null\nkcg \"$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep cluster\\.x-k8s\\.io | tr \"\\n\" \",\" | sed -e 's/,$//')\" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion 2>/dev/null\n\nkubectl api-resources --namespaced=false --verbs=delete -o name| grep logging\\.banzaicloud\\.io | tr \"\\n\" \",\" | sed -e 's/,$//'\nkubectl api-resources --namespaced=false --verbs=delete -o name| grep gatekeeper\\.sh | tr \"\\n\" \",\" | sed -e 's/,$//'\n\nkcg crd | grep cattle\\.io | grep -v helm\\.cattle\\.io | grep -v k3s\\.cattle\\.io\nkcg crd | grep logging\\.banzaicloud\\.io\nkcg crd | grep monitoring\\.coreos\\.com\nkcg crd | grep gatekeeper\\.sh\nkcg crd | grep istio\\.io\nkcg crd | grep cluster\\.x-k8s\\.io\n\nkcg configmap -A | grep istio-ca-root-cert\n\nkubectl api-resources --namespaced=true --verbs=delete -o name | grep cattle\\.io | grep -v helm\\.cattle\\.io | grep -v k3s\\.cattle\\.io | tr \"\\n\" \",\" | sed -e 's/,$//'\nkubectl api-resources --namespaced=false --verbs=delete -o name| grep cattle\\.io | tr \"\\n\" \",\" | sed -e 's/,$//'\n"
  }
]