Repository: rancher/rancher-cleanup
Branch: main
Commit: f0d0d19cf820
Files: 14
Total size: 37.5 KB
Directory structure:
gitextract_nbeir257/
├── .github/
│ ├── CODEOWNERS
│ ├── renovate.json
│ └── workflows/
│ ├── fossa.yml
│ ├── pull-request.yaml
│ ├── release.yaml
│ └── renovate.yml
├── .gitignore
├── Dockerfile
├── README.md
├── cleanup.sh
├── deploy/
│ ├── rancher-cleanup.yaml
│ └── verify.yaml
├── scripts/
│ └── github-release
└── verify.sh
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/CODEOWNERS
================================================
* @rancher/orbs
================================================
FILE: .github/renovate.json
================================================
{
"extends": ["github>rancher/renovate-config#release"],
"baseBranches": ["main"],
"packageRules": [
{
"matchDepNames": [
"kubernetes/kubernetes"
],
"allowedVersions": "<1.26.0"
}
]
}
================================================
FILE: .github/workflows/fossa.yml
================================================
name: FOSSA Scanning
on:
push:
branches: ["main", "master", "release/**"]
workflow_dispatch:
permissions:
contents: read
id-token: write
jobs:
fossa-scanning:
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
# The FOSSA token is shared between all repos in Rancher's GH org. It can be
# used directly and there is no need to request specific access to EIO.
- name: Read FOSSA token
uses: rancher-eio/read-vault-secrets@main
with:
secrets: |
secret/data/github/org/rancher/fossa/push token | FOSSA_API_KEY_PUSH_ONLY
- name: FOSSA scan
uses: fossas/fossa-action@main
with:
api-key: ${{ env.FOSSA_API_KEY_PUSH_ONLY }}
# Only runs the scan and do not provide/returns any results back to the
# pipeline.
run-tests: false
================================================
FILE: .github/workflows/pull-request.yaml
================================================
name: Pull request checks
on: [pull_request]
jobs:
shellcheck:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Run shellcheck
run: |
sudo apt install shellcheck
shellcheck -S warning *.sh
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Build docker image
run: docker build -t ${{ github.repository_owner }}/rancher-cleanup:test .
================================================
FILE: .github/workflows/release.yaml
================================================
name: Release
on:
push:
tags:
- 'v*'
workflow_dispatch:
inputs:
imageTag:
description: Base Image Tag (before the :)
type: string
required: false
default: rancher/rancher-cleanup
jobs:
shellcheck:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Run shellcheck
run: |
sudo apt install shellcheck
shellcheck -S warning *.sh
test-build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Build docker image
run: docker build -t ${{ github.repository_owner }}/rancher-cleanup:test .
release:
permissions:
id-token: write
contents: write
runs-on: ubuntu-latest
needs:
- shellcheck
- test-build
steps:
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
- name: Get dockerhub username and password from vault
if: ${{ github.repository == 'rancher/rancher-cleanup' }}
uses: rancher-eio/read-vault-secrets@0da85151ad1f19ed7986c41587e45aac1ace74b6 # v3
with:
secrets: |
secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials username | DOCKER_USERNAME ;
secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials password | DOCKER_PASSWORD
- name: Log into Docker Hub
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3
with:
username: ${{ env.DOCKER_USERNAME || secrets.DOCKER_USERNAME }}
password: ${{ env.DOCKER_PASSWORD || secrets.DOCKER_PASSWORD }}
- name: Create github release
run: scripts/github-release
env:
TAG: ${{ github.ref_name }}
GH_REPO: ${{ github.repository }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push docker image (prerelease)
if: ${{ contains(github.ref_name, 'rc') || contains(github.ref_name, 'alpha') }}
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5
env:
IMAGE_TAG: ${{ inputs.imageTag || github.repository }}
with:
platforms: linux/amd64
push: true
tags: |
${{ env.IMAGE_TAG }}:${{ github.ref_name }}
${{ env.IMAGE_TAG }}:${{ github.ref_name }}-amd64
- name: Build and push docker image (full release)
if: ${{ !contains(github.ref_name, 'rc') && !contains(github.ref_name, 'alpha') }}
uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5
env:
IMAGE_TAG: ${{ inputs.imageTag || github.repository }}
with:
platforms: linux/amd64
push: true
tags: |
${{ env.IMAGE_TAG }}:${{ github.ref_name }}
${{ env.IMAGE_TAG }}:${{ github.ref_name }}-amd64
${{ env.IMAGE_TAG }}:latest
${{ env.IMAGE_TAG }}:latest-amd64
- name: Attest build provenance
uses: actions/attest@59d89421af93a897026c735860bf21b6eb4f7b26 # v4.1.0
with:
subject-checksums: checksums.txt
- name: Publish github release
shell: bash
run: gh release edit ${{ github.ref_name }} --draft=false
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
================================================
FILE: .github/workflows/renovate.yml
================================================
name: Renovate
on:
workflow_dispatch:
inputs:
logLevel:
description: "Override default log level"
required: false
default: "info"
type: string
overrideSchedule:
description: "Override all schedules"
required: false
default: "false"
type: string
# Run twice in the early morning (UTC) for initial and follow up steps (create pull request and merge)
schedule:
- cron: '30 4,6 * * *'
jobs:
call-workflow:
uses: rancher/renovate-config/.github/workflows/renovate.yml@c88cbe41a49d02648b9bf83aa5a64902151323fa # release
with:
logLevel: ${{ inputs.logLevel || 'info' }}
overrideSchedule: ${{ github.event.inputs.overrideSchedule == 'true' && '{''schedule'':null}' || '' }}
secrets: inherit
================================================
FILE: .gitignore
================================================
/.dapper
/.cache
/bin
/dist
*.swp
.idea
./backup
./backup-restore-operator
backup-restore-operator
/secretExamples
test-backup-location-local/
.DS_Store
/backups
sha256sum-amd64.txt
================================================
FILE: Dockerfile
================================================
FROM registry.suse.com/bci/bci-base:15.7
ENV KUBECTL_VERSION=v1.30.14
ENV KUBECTL_SUM_AMD64=7ccac981ece0098284d8961973295f5124d78eab7b89ba5023f35591baa16271
WORKDIR /usr/local/bin
RUN set -eux; \
curl -LO "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl"; \
echo "${KUBECTL_SUM_AMD64} kubectl" | sha256sum -c -; \
chmod +x kubectl
COPY cleanup.sh verify.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/cleanup.sh /usr/local/bin/verify.sh
ENTRYPOINT ["/usr/local/bin/cleanup.sh"]
================================================
FILE: README.md
================================================
# Rancher resource cleanup script
**Warning**
```
THIS WILL DELETE ALL RESOURCES CREATED BY RANCHER
MAKE SURE YOU HAVE CREATED AND TESTED YOUR BACKUPS
THIS IS A NON REVERSIBLE ACTION
```
This script will delete all Kubernetes resources belonging to/created by Rancher (including installed tools like logging/monitoring/opa gatekeeper/etc). Note: this does not remove any Longhorn resources.
## Using the cleanup script
### Run as a Kubernetes Job
* Deploy the job using `kubectl create -f deploy/rancher-cleanup.yaml`
* Watch logs using `kubectl -n kube-system logs -l job-name=cleanup-job -f`
## Verify
* Deploy the job using `kubectl create -f deploy/verify.yaml`
* Watch logs using `kubectl -n kube-system logs -l job-name=verify-job -f`, output should be empty (besides deprecation warnings)
* Check completed logs using `kubectl -n kube-system logs -l job-name=verify-job -f | grep -v "is deprecated"`, this will exclude deprecation warnings.
## Developing
### How to Make a Release
Releases are done via github actions, and triggered by pushing a
tag to the remote that starts with `v`. There are two types of
releases: "pre" and "full" release. To make a prerelease, push a
tag that contains the string `rc` or `alpha` (for example, `v1.2.3-rc1`
or `v1.2.3-alpha1`). To make a full release, push a tag that does
not contain either of these strings (for example, `v1.2.3`).
================================================
FILE: cleanup.sh
================================================
#!/bin/bash
# Overridden on package
SCRIPT_VERSION="unreleased"
echo "Running cleanup.sh version ${SCRIPT_VERSION}"
# Warning
echo "==================== WARNING ===================="
echo "THIS WILL DELETE ALL RESOURCES CREATED BY RANCHER"
echo "MAKE SURE YOU HAVE CREATED AND TESTED YOUR BACKUPS"
echo "THIS IS A NON REVERSIBLE ACTION"
echo "==================== WARNING ===================="
# Linux only for now
if [ "$(uname -s)" != "Linux" ]; then
echo "Must be run on Linux"
exit 1
fi
# Check kubectl existence
if ! type kubectl >/dev/null 2>&1; then
echo "kubectl not found in PATH, make sure kubectl is available"
exit 1
fi
# Check timeout existence
if ! type timeout >/dev/null 2>&1; then
echo "timeout not found in PATH, make sure timeout is available"
exit 1
fi
# Test connectivity
if ! kubectl get nodes >/dev/null 2>&1; then
echo "'kubectl get nodes' exited non-zero, make sure environment variable KUBECONFIG is set to a working kubeconfig file"
exit 1
fi
echo "=> Printing cluster info for confirmation"
kubectl cluster-info
kubectl get nodes -o wide
if [ "$1" != "force" ]; then
echo "Do you want to continue (y/n)?"
read -r answer
if [ "$answer" != "y" ]; then
exit 1
fi
fi
kcd()
{
i="0"
while [ $i -lt 4 ]; do
if timeout 21 sh -c 'kubectl delete --ignore-not-found=true --grace-period=15 --timeout=20s '"$*"''; then
break
fi
i=$((i+1))
done
}
kcpf()
{
FINALIZERS=$(kubectl get -o jsonpath="{.metadata.finalizers}" "$@")
if [ "x${FINALIZERS}" != "x" ]; then
echo "Finalizers before for ${*}: ${FINALIZERS}"
kubectl patch -p '{"metadata":{"finalizers":null}}' --type=merge "$@"
echo "Finalizers after for ${*}: $(kubectl get -o jsonpath="{.metadata.finalizers}" "${@}")"
fi
}
kcdns()
{
if kubectl get namespace "$1"; then
kcpf namespace "$1"
FINALIZERS=$(kubectl get -o jsonpath="{.spec.finalizers}" namespace "$1")
if [ "x${FINALIZERS}" != "x" ]; then
echo "Finalizers before for namespace ${1}: ${FINALIZERS}"
kubectl get -o json namespace "$1" | tr -d "\n" | sed "s/\"finalizers\": \[[^]]\+\]/\"finalizers\": []/" | kubectl replace --raw /api/v1/namespaces/$1/finalize -f -
echo "Finalizers after for namespace ${1}: $(kubectl get -o jsonpath="{.spec.finalizers}" namespace ${1})"
fi
i="0"
while [ $i -lt 4 ]; do
if timeout 21 sh -c 'kubectl delete --ignore-not-found=true --grace-period=15 --timeout=20s namespace '"$1"''; then
break
fi
i=$((i+1))
done
fi
}
printapiversion()
{
if echo "$1" | grep -q '/'; then
echo "$1" | cut -d'/' -f1
else
echo ""
fi
}
set -x
# Namespaces with resources that probably have finalizers/dependencies (needs manual traverse to patch and delete else it will hang)
CATTLE_NAMESPACES="local cattle-system cattle-impersonation-system cattle-global-data cattle-global-nt cattle-provisioning-capi-system cattle-turtles-system cattle-capi-system"
TOOLS_NAMESPACES="istio-system cattle-resources-system cis-operator-system cattle-dashboards cattle-gatekeeper-system cattle-alerting cattle-logging cattle-pipeline cattle-prometheus rancher-operator-system cattle-monitoring-system cattle-logging-system cattle-elemental-system"
FLEET_NAMESPACES="cattle-fleet-clusters-system cattle-fleet-local-system cattle-fleet-system fleet-default fleet-local fleet-system"
# Delete rancher install to not have anything running that (re)creates resources
kcd "-n cattle-system deploy,ds --all"
kubectl -n cattle-system wait --for delete pod --selector=app=rancher
# Delete the only resource not in cattle namespaces
kcd "-n kube-system configmap cattle-controllers"
# Delete any blocking webhooks from preventing requests
if kubectl get mutatingwebhookconfigurations -o name | grep -q cattle\.io; then
kcd "$(kubectl get mutatingwebhookconfigurations -o name | grep cattle\.io)"
fi
if kubectl get validatingwebhookconfigurations -o name | grep -q cattle\.io; then
kcd "$(kubectl get validatingwebhookconfigurations -o name | grep cattle\.io)"
fi
# Delete any monitoring webhooks
if kubectl get mutatingwebhookconfigurations -o name | grep -q rancher-monitoring; then
kcd "$(kubectl get mutatingwebhookconfigurations -o name | grep rancher-monitoring)"
fi
if kubectl get validatingwebhookconfigurations -o name | grep -q rancher-monitoring; then
kcd "$(kubectl get validatingwebhookconfigurations -o name | grep rancher-monitoring)"
fi
# Delete any gatekeeper webhooks
if kubectl get validatingwebhookconfigurations -o name | grep -q gatekeeper; then
kcd "$(kubectl get validatingwebhookconfigurations -o name | grep gatekeeper)"
fi
# Delete any istio webhooks
if kubectl get mutatingwebhookconfigurations -o name | grep -q istio; then
kcd "$(kubectl get mutatingwebhookconfigurations -o name | grep istio)"
fi
if kubectl get validatingwebhookconfigurations -o name | grep -q istio; then
kcd "$(kubectl get validatingwebhookconfigurations -o name | grep istio)"
fi
# Delete any capi webhooks
if kubectl get mutatingwebhookconfigurations -o name | grep -q capi; then
kcd "$(kubectl get mutatingwebhookconfigurations -o name | grep capi)"
fi
if kubectl get validatingwebhookconfigurations -o name | grep -q capi; then
kcd "$(kubectl get validatingwebhookconfigurations -o name | grep capi)"
fi
# Cluster api
if [ -n "$(kubectl get validatingwebhookconfiguration.admissionregistration.k8s.io/validating-webhook-configuration)" ]; then
kcd validatingwebhookconfiguration.admissionregistration.k8s.io/validating-webhook-configuration
fi
if [ -n "$(kubectl get mutatingwebhookconfiguration.admissionregistration.k8s.io/mutating-webhook-configuration)" ]; then
kcd mutatingwebhookconfiguration.admissionregistration.k8s.io/mutating-webhook-configuration
fi
# Delete generic k8s resources either labeled with norman or resource name starting with "cattle|rancher|fleet"
# ClusterRole/ClusterRoleBinding
kubectl get clusterrolebinding -l cattle.io/creator=norman --no-headers -o custom-columns=NAME:.metadata.name | while read -r CRB; do
kcpf clusterrolebindings "$CRB"
kcd "clusterrolebindings ""$CRB"""
done
kubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^cattle- | while read -r CRB; do
kcpf clusterrolebindings "$CRB"
kcd "clusterrolebindings ""$CRB"""
done
kubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep rancher | while read -r CRB; do
kcpf clusterrolebindings "$CRB"
kcd "clusterrolebindings ""$CRB"""
done
kubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^fleet- | while read -r CRB; do
kcpf clusterrolebindings "$CRB"
kcd "clusterrolebindings ""$CRB"""
done
kubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^gitjob | while read -r CRB; do
kcpf clusterrolebindings "$CRB"
kcd "clusterrolebindings ""$CRB"""
done
kubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^pod-impersonation-helm- | while read -r CRB; do
kcpf clusterrolebindings "$CRB"
kcd "clusterrolebindings ""$CRB"""
done
kubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^gatekeeper | while read -r CRB; do
kcpf clusterrolebindings "$CRB"
kcd "clusterrolebindings ""$CRB"""
done
kubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^cis | while read -r CRB; do
kcpf clusterrolebindings "$CRB"
kcd "clusterrolebindings ""$CRB"""
done
kubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^istio | while read -r CRB; do
kcpf clusterrolebindings "$CRB"
kcd "clusterrolebindings ""$CRB"""
done
kubectl get clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^elemental | while read -r CRB; do
kcpf clusterrolebindings "$CRB"
kcd "clusterrolebindings ""$CRB"""
done
kubectl get clusterroles -l cattle.io/creator=norman --no-headers -o custom-columns=NAME:.metadata.name | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
kubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^cattle- | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
kubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep rancher | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
kubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^fleet | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
kubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^gitjob | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
kubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^pod-impersonation-helm | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
kubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^logging- | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
kubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^monitoring- | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
kubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^gatekeeper | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
kubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^cis | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
kubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^istio | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
kubectl get clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^elemental | while read -r CR; do
kcpf clusterroles "$CR"
kcd "clusterroles ""$CR"""
done
# Bulk delete data CRDs
# Saves time in the loop below where we patch/delete individual resources
DATACRDS="settings.management.cattle.io authconfigs.management.cattle.io features.management.cattle.io rkeaddons.management.cattle.io rkek8sserviceoptions.management.cattle.io rkek8ssystemimages.management.cattle.io catalogtemplateversions.management.cattle.io catalogtemplates.management.cattle.io rkeaddons.management.cattle.io tokens.management.cattle.io elemental.cattle.io"
for CRD in $DATACRDS; do
kcd "crd $CRD"
done
# Delete apiservice
for APISERVICE in $(kubectl get apiservice -o name | grep cattle | grep -v k3s\.cattle\.io | grep -v helm\.cattle\.io) $(kubectl get apiservice -o name | grep gatekeeper\.sh) $(kubectl get apiservice -o name | grep istio\.io) $(kubectl get apiservice elemental-operator) apiservice\.apiregistration\.k8s\.io\/v1beta1\.custom\.metrics\.k8s\.io; do
kcd "$APISERVICE"
done
# Pod security policies
#Check if psps are available on the target cluster
kubectl get podsecuritypolicy > /dev/null 2>&1
# Check the exit code and only run if there are psps available on the cluster
if [ $? -ne 0 ]; then
echo "Removing PSPs"
# Rancher logging
for PSP in $(kubectl get podsecuritypolicy -o name -l app.kubernetes.io/name=rancher-logging) podsecuritypolicy.policy/rancher-logging-rke-aggregator; do
kcd "$PSP"
done
# Rancher monitoring
for PSP in $(kubectl get podsecuritypolicy -o name -l release=rancher-monitoring) $(kubectl get podsecuritypolicy -o name -l app=rancher-monitoring-crd-manager) $(kubectl get podsecuritypolicy -o name -l app=rancher-monitoring-patch-sa) $(kubectl get podsecuritypolicy -o name -l app.kubernetes.io/instance=rancher-monitoring); do
kcd "$PSP"
done
# Rancher OPA
for PSP in $(kubectl get podsecuritypolicy -o name -l release=rancher-gatekeeper) $(kubectl get podsecuritypolicy -o name -l app=rancher-gatekeeper-crd-manager); do
kcd "$PSP"
done
# Backup restore operator
for PSP in $(kubectl get podsecuritypolicy -o name -l app.kubernetes.io/name=rancher-backup); do
kcd "$PSP"
done
# Istio
for PSP in istio-installer istio-psp kiali-psp psp-istio-cni; do
kcd "podsecuritypolicy $PSP"
done
else
echo "Kubernetes version v1.25 or higher, skipping PSP removal"
fi
# Get all namespaced resources and delete in loop
# Exclude helm.cattle.io and k3s.cattle.io to not break K3S/RKE2 addons
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep cattle\.io | grep -v helm\.cattle\.io | grep -v k3s\.cattle\.io | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
# Logging
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep logging\.banzaicloud\.io | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep -v events\.events\.k8s\.io | grep -v ^events$ | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | grep rancher-monitoring | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
# Monitoring
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep monitoring\.coreos\.com | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
# Gatekeeper
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep gatekeeper\.sh | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
# Cluster-api
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep cluster\.x-k8s\.io | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
# Get all non-namespaced resources and delete in loop
kubectl get "$(kubectl api-resources --namespaced=false --verbs=delete -o name| grep cattle\.io | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o name | while read -r NAME; do
kcpf "$NAME"
kcd "$NAME"
done
# Logging
kubectl get "$(kubectl api-resources --namespaced=false --verbs=delete -o name| grep logging\.banzaicloud\.io | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o name | while read -r NAME; do
kcpf "$NAME"
kcd "$NAME"
done
# Gatekeeper
kubectl get "$(kubectl api-resources --namespaced=false --verbs=delete -o name| grep gatekeeper\.sh | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o name | while read -r NAME; do
kcpf "$NAME"
kcd "$NAME"
done
# Delete istio certs
for NS in $(kubectl get ns --no-headers -o custom-columns=NAME:.metadata.name); do
kcd "-n ${NS} configmap istio-ca-root-cert"
done
# Delete all cattle namespaces, including project namespaces (p-),cluster (c-),cluster-fleet and user (user-) namespaces
for NS in $TOOLS_NAMESPACES $FLEET_NAMESPACES $CATTLE_NAMESPACES; do
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\.events\.k8s\.io | grep -v ^events$ | tr "\n" "," | sed -e 's/,$//')" -n "$NS" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
kcdns "$NS"
done
for NS in $(kubectl get namespace --no-headers -o custom-columns=NAME:.metadata.name | grep "^cluster-fleet"); do
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\.events\.k8s\.io | grep -v ^events$ | tr "\n" "," | sed -e 's/,$//')" -n "$NS" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
kcdns "$NS"
done
for NS in $(kubectl get namespace --no-headers -o custom-columns=NAME:.metadata.name | grep "^p-"); do
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\.events\.k8s\.io | grep -v ^events$ | tr "\n" "," | sed -e 's/,$//')" -n "$NS" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
kcdns "$NS"
done
for NS in $(kubectl get namespace --no-headers -o custom-columns=NAME:.metadata.name | grep "^c-"); do
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\.events\.k8s\.io | grep -v ^events$ | tr "\n" "," | sed -e 's/,$//')" -n "$NS" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
kcdns "$NS"
done
for NS in $(kubectl get namespace --no-headers -o custom-columns=NAME:.metadata.name | grep "^user-"); do
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\.events\.k8s\.io | grep -v ^events$ | tr "\n" "," | sed -e 's/,$//')" -n "$NS" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
kcdns "$NS"
done
for NS in $(kubectl get namespace --no-headers -o custom-columns=NAME:.metadata.name | grep "^u-"); do
kubectl get "$(kubectl api-resources --namespaced=true --verbs=delete -o name| grep -v events\.events\.k8s\.io | grep -v ^events$ | tr "\n" "," | sed -e 's/,$//')" -n "$NS" --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | while read -r NAME NAMESPACE KIND APIVERSION; do
kcpf -n "$NAMESPACE" "${KIND}.$(printapiversion "$APIVERSION")" "$NAME"
kcd "-n ""$NAMESPACE"" ${KIND}.$(printapiversion "$APIVERSION") ""$NAME"""
done
kcdns "$NS"
done
# Delete logging CRDs
for CRD in $(kubectl get crd -o name | grep logging\.banzaicloud\.io); do
kcd "$CRD"
done
# Delete monitoring CRDs
for CRD in $(kubectl get crd -o name | grep monitoring\.coreos\.com); do
kcd "$CRD"
done
# Delete OPA CRDs
for CRD in $(kubectl get crd -o name | grep gatekeeper\.sh); do
kcd "$CRD"
done
# Delete Istio CRDs
for CRD in $(kubectl get crd -o name | grep istio\.io); do
kcd "$CRD"
done
# Delete cluster-api CRDs
for CRD in $(kubectl get crd -o name | grep cluster\.x-k8s\.io); do
kcd "$CRD"
done
# Delete all cattle CRDs
# Exclude helm.cattle.io and addons.k3s.cattle.io to not break RKE2 addons
for CRD in $(kubectl get crd -o name | grep cattle\.io | grep -v helm\.cattle\.io | grep -v k3s\.cattle\.io); do
kcd "$CRD"
done
================================================
FILE: deploy/rancher-cleanup.yaml
================================================
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cleanup-service-account
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cleanup-admin
subjects:
- kind: ServiceAccount
name: cleanup-service-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
apiVersion: batch/v1
kind: Job
metadata:
name: cleanup-job
namespace: kube-system
labels:
app: cleanup
spec:
template:
spec:
containers:
- name: cleanup
image: rancher/rancher-cleanup:latest
args: [ "force" ]
imagePullPolicy: Always
serviceAccountName: cleanup-service-account
restartPolicy: Never
backoffLimit: 4
================================================
FILE: deploy/verify.yaml
================================================
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cleanup-service-account
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cleanup-admin
subjects:
- kind: ServiceAccount
name: cleanup-service-account
namespace: kube-system
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
---
apiVersion: batch/v1
kind: Job
metadata:
name: verify-job
namespace: kube-system
labels:
app: verify
spec:
template:
spec:
containers:
- name: verify
image: rancher/rancher-cleanup:latest
command: [ "verify.sh" ]
imagePullPolicy: Always
serviceAccountName: cleanup-service-account
restartPolicy: Never
backoffLimit: 0
================================================
FILE: scripts/github-release
================================================
#!/bin/bash
# This script requires the following environment variables to be set:
# TAG: the remote tag that you want the release to be based off of
# GH_REPO: the github repo to create the release on, in <owner>/<repo> format
set -e
if [ -z "$TAG" ]; then
echo "Must specify TAG environment variable"
exit 1
fi
if [ -z "$GH_REPO" ]; then
echo "Must specify GH_REPO environment variable"
exit 1
fi
# create checksum file
CHECKSUM_FILE='checksums.txt'
sha256sum cleanup.sh verify.sh > $CHECKSUM_FILE
if echo "$TAG" | grep -Eq "rc|alpha"; then
gh release create --draft --verify-tag --generate-notes --prerelease --latest=false "$TAG" cleanup.sh verify.sh $CHECKSUM_FILE
else
gh release create --draft --verify-tag --generate-notes "$TAG" cleanup.sh verify.sh $CHECKSUM_FILE
fi
================================================
FILE: verify.sh
================================================
#!/bin/bash
# Overridden on package
SCRIPT_VERSION="unreleased"
echo "Running verify.sh version ${SCRIPT_VERSION}"
kcg()
{
kubectl get --ignore-not-found=true "$@"
}
kcg -n cattle-system deploy,ds
kcg -n kube-system configmap cattle-controllers
kcg mutatingwebhookconfigurations -o name | grep cattle\.io
kcg mutatingwebhookconfigurations -o name | grep rancher-monitoring
kcg mutatingwebhookconfigurations -o name | grep istio
kcg mutatingwebhookconfigurations -o name | grep mutating-webhook-configuration
kcg validatingwebhookconfigurations -o name | grep cattle\.io
kcg validatingwebhookconfigurations -o name | grep rancher-monitoring
kcg validatingwebhookconfigurations -o name | grep gatekeeper
kcg validatingwebhookconfigurations -o name | grep istio
kcg validatingwebhookconfigurations -o name | grep validating-webhook-configuration
kcg apiservice -o name | grep cattle\.io | grep -v k3s\.cattle\.io | grep -v helm\.cattle\.io
kcg apiservice -o name | grep istio
kcg apiservice -o name | grep gatekeeper
kcg apiservice -o name | grep custom\.metrics\.k8s\.io
kcg apiservice -o name | grep elemental
kcg clusterrolebinding -l cattle.io/creator=norman
kcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^cattle-
kcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep rancher
kcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^fleet-
kcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^gitjob
kcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^pod-impersonation-helm
kcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^gatekeeper
kcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^cis
kcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^istio
kcg clusterrolebinding --no-headers -o custom-columns=NAME:.metadata.name | grep ^elemental
kcg clusterroles -l cattle.io/creator=norman
kcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^cattle-
kcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep rancher
kcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^fleet
kcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^gitjob
kcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^pod-impersonation-helm
kcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^logging-
kcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^monitoring-
kcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^gatekeeper
kcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^cis
kcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^istio
kcg clusterroles --no-headers -o custom-columns=NAME:.metadata.name | grep ^elemental
# Pod security policies
#Check if psps are available on the target cluster
kubectl get podsecuritypolicy > /dev/null 2>&1
# Check the exit code and only run if there are psps available on the cluster
if [ $? -ne 0 ]; then
echo "Checking for PSPs"
kcg podsecuritypolicy -o name -l app.kubernetes.io/name=rancher-logging
kcg podsecuritypolicy.policy/rancher-logging-rke-aggregator
kcg podsecuritypolicy -o name -l release=rancher-monitoring
kcg podsecuritypolicy -o name -l app=rancher-monitoring-crd-manager
kcg podsecuritypolicy -o name -l app=rancher-monitoring-patch-sa
kcg podsecuritypolicy -o name -l app.kubernetes.io/instance=rancher-monitoring
kcg podsecuritypolicy -o name -l release=rancher-gatekeeper
kcg podsecuritypolicy -o name -l app=rancher-gatekeeper-crd-manager
kcg podsecuritypolicy -o name -l app.kubernetes.io/name=rancher-backup
kcg podsecuritypolicy -o name | grep istio-installer
kcg podsecuritypolicy -o name | grep istio-psp
kcg podsecuritypolicy -o name | grep kiali-psp
kcg podsecuritypolicy -o name | grep psp-istio-cni
else
echo "Kubernetes version v1.25 or higher, skipping PSP check"
fi
kcg namespace -o name | grep "^cattle"
kcg namespace -o name | grep "rancher-operator-system"
kcg namespace -o name | grep "cis-operator-system"
kcg namespace -o name | grep "^c-"
kcg namespace -o name | grep "^p-"
kcg namespace -o name | grep "^user-"
kcg namespace -o name | grep "^u-"
kcg namespace -o name | grep "fleet"
kcg namespace -o name | grep "istio"
kcg namespace -o name | grep "elemental"
kcg "$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep logging\.banzaicloud\.io | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion 2>/dev/null
kcg "$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep -v events\.events\.k8s\.io | grep -v ^events$ | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion | grep rancher-monitoring 2>/dev/null
kcg "$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep monitoring\.coreos\.com | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion 2>/dev/null
kcg "$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep gatekeeper\.sh | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion 2>/dev/null
kcg "$(kubectl api-resources --namespaced=true --verbs=delete -o name | grep cluster\.x-k8s\.io | tr "\n" "," | sed -e 's/,$//')" -A --no-headers -o custom-columns=NAME:.metadata.name,NAMESPACE:.metadata.namespace,KIND:.kind,APIVERSION:.apiVersion 2>/dev/null
kubectl api-resources --namespaced=false --verbs=delete -o name| grep logging\.banzaicloud\.io | tr "\n" "," | sed -e 's/,$//'
kubectl api-resources --namespaced=false --verbs=delete -o name| grep gatekeeper\.sh | tr "\n" "," | sed -e 's/,$//'
kcg crd | grep cattle\.io | grep -v helm\.cattle\.io | grep -v k3s\.cattle\.io
kcg crd | grep logging\.banzaicloud\.io
kcg crd | grep monitoring\.coreos\.com
kcg crd | grep gatekeeper\.sh
kcg crd | grep istio\.io
kcg crd | grep cluster\.x-k8s\.io
kcg configmap -A | grep istio-ca-root-cert
kubectl api-resources --namespaced=true --verbs=delete -o name | grep cattle\.io | grep -v helm\.cattle\.io | grep -v k3s\.cattle\.io | tr "\n" "," | sed -e 's/,$//'
kubectl api-resources --namespaced=false --verbs=delete -o name| grep cattle\.io | tr "\n" "," | sed -e 's/,$//'
gitextract_nbeir257/ ├── .github/ │ ├── CODEOWNERS │ ├── renovate.json │ └── workflows/ │ ├── fossa.yml │ ├── pull-request.yaml │ ├── release.yaml │ └── renovate.yml ├── .gitignore ├── Dockerfile ├── README.md ├── cleanup.sh ├── deploy/ │ ├── rancher-cleanup.yaml │ └── verify.yaml ├── scripts/ │ └── github-release └── verify.sh
Condensed preview — 14 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (41K chars).
[
{
"path": ".github/CODEOWNERS",
"chars": 16,
"preview": "* @rancher/orbs\n"
},
{
"path": ".github/renovate.json",
"chars": 226,
"preview": "{\n \"extends\": [\"github>rancher/renovate-config#release\"],\n \"baseBranches\": [\"main\"],\n \"packageRules\": [\n {\n \""
},
{
"path": ".github/workflows/fossa.yml",
"chars": 934,
"preview": "name: FOSSA Scanning\n\non:\n push:\n branches: [\"main\", \"master\", \"release/**\"]\n workflow_dispatch:\n\npermissions:\n co"
},
{
"path": ".github/workflows/pull-request.yaml",
"chars": 544,
"preview": "name: Pull request checks\n\non: [pull_request]\n\njobs:\n shellcheck:\n runs-on: ubuntu-latest\n steps:\n - uses: a"
},
{
"path": ".github/workflows/release.yaml",
"chars": 3720,
"preview": "name: Release\n\non:\n push:\n tags:\n - 'v*'\n workflow_dispatch:\n inputs:\n imageTag:\n description: "
},
{
"path": ".github/workflows/renovate.yml",
"chars": 800,
"preview": "name: Renovate\non:\n workflow_dispatch:\n inputs:\n logLevel:\n description: \"Override default log level\"\n "
},
{
"path": ".gitignore",
"chars": 182,
"preview": "/.dapper\n/.cache\n/bin\n/dist\n*.swp\n.idea\n./backup\n./backup-restore-operator\nbackup-restore-operator\n/secretExamples\ntest-"
},
{
"path": "Dockerfile",
"chars": 516,
"preview": "FROM registry.suse.com/bci/bci-base:15.7\n\nENV KUBECTL_VERSION=v1.30.14\nENV KUBECTL_SUM_AMD64=7ccac981ece0098284d89619732"
},
{
"path": "README.md",
"chars": 1399,
"preview": "# Rancher resource cleanup script\n\n**Warning**\n```\nTHIS WILL DELETE ALL RESOURCES CREATED BY RANCHER\nMAKE SURE YOU HAVE "
},
{
"path": "cleanup.sh",
"chars": 21041,
"preview": "#!/bin/bash\n# Overridden on package\nSCRIPT_VERSION=\"unreleased\"\necho \"Running cleanup.sh version ${SCRIPT_VERSION}\"\n\n# W"
},
{
"path": "deploy/rancher-cleanup.yaml",
"chars": 772,
"preview": "---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: cleanup-service-account\n namespace: kube-system\n---\napiVersio"
},
{
"path": "deploy/verify.yaml",
"chars": 776,
"preview": "---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: cleanup-service-account\n namespace: kube-system\n---\napiVersio"
},
{
"path": "scripts/github-release",
"chars": 793,
"preview": "#!/bin/bash\n\n# This script requires the following environment variables to be set:\n# TAG: the remote tag that you want t"
},
{
"path": "verify.sh",
"chars": 6711,
"preview": "#!/bin/bash\n# Overridden on package\nSCRIPT_VERSION=\"unreleased\"\necho \"Running verify.sh version ${SCRIPT_VERSION}\"\n\nkcg("
}
]
About this extraction
This page contains the full source code of the rancher/rancher-cleanup GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 14 files (37.5 KB), approximately 11.7k tokens. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.