Repository: Azure/eraser
Branch: main
Commit: 20576a24c512
Files: 345
Total size: 1.0 MB
Directory structure:
gitextract_ygu3dx5m/
├── .dockerignore
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug-report.yml
│ │ └── feature-request.yml
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── dependabot.yml
│ ├── semantic.yml
│ └── workflows/
│ ├── README.md
│ ├── build-id.yaml
│ ├── codeql.yaml
│ ├── dep-review.yaml
│ ├── deploy_docs.yaml
│ ├── e2e-build.yaml
│ ├── e2e-test.yaml
│ ├── patch-docs.yaml
│ ├── release-pr.yaml
│ ├── release.yaml
│ ├── scan-images.yaml
│ ├── scorecard.yml
│ ├── test.yaml
│ └── upgrade.yaml
├── .gitignore
├── .golangci.yaml
├── .trivyignore
├── CODEOWNERS
├── CODE_OF_CONDUCT.md
├── Dockerfile
├── LICENSE
├── MAINTAINERS.md
├── Makefile
├── PROJECT
├── README.md
├── api/
│ ├── group.go
│ ├── unversioned/
│ │ ├── config/
│ │ │ └── config.go
│ │ ├── doc.go
│ │ ├── eraserconfig_types.go
│ │ ├── groupversion_info.go
│ │ ├── imagejob_types.go
│ │ ├── imagelist_types.go
│ │ └── zz_generated.deepcopy.go
│ ├── v1/
│ │ ├── doc.go
│ │ ├── groupversion_info.go
│ │ ├── imagejob_types.go
│ │ ├── imagelist_types.go
│ │ ├── zz_generated.conversion.go
│ │ └── zz_generated.deepcopy.go
│ ├── v1alpha1/
│ │ ├── config/
│ │ │ └── config.go
│ │ ├── custom_conversions.go
│ │ ├── doc.go
│ │ ├── eraserconfig_types.go
│ │ ├── groupversion_info.go
│ │ ├── imagejob_types.go
│ │ ├── imagelist_types.go
│ │ ├── zz_generated.conversion.go
│ │ └── zz_generated.deepcopy.go
│ ├── v1alpha2/
│ │ ├── config/
│ │ │ └── config.go
│ │ ├── custom_conversions.go
│ │ ├── doc.go
│ │ ├── eraserconfig_types.go
│ │ ├── groupversion_info.go
│ │ ├── zz_generated.conversion.go
│ │ └── zz_generated.deepcopy.go
│ └── v1alpha3/
│ ├── config/
│ │ └── config.go
│ ├── doc.go
│ ├── eraserconfig_types.go
│ ├── groupversion_info.go
│ ├── runtime_spec_test.go
│ ├── zz_generated.conversion.go
│ └── zz_generated.deepcopy.go
├── build/
│ └── version.sh
├── config/
│ ├── crd/
│ │ ├── bases/
│ │ │ ├── _.yaml
│ │ │ ├── eraser.sh_imagejobs.yaml
│ │ │ └── eraser.sh_imagelists.yaml
│ │ ├── kustomization.yaml
│ │ ├── kustomizeconfig.yaml
│ │ └── patches/
│ │ ├── cainjection_in_eraserconfigs.yaml
│ │ ├── cainjection_in_imagelists.yaml
│ │ ├── webhook_in_eraserconfigs.yaml
│ │ └── webhook_in_imagelists.yaml
│ ├── default/
│ │ ├── kustomization.yaml
│ │ └── manager_auth_proxy_patch.yaml
│ ├── manager/
│ │ ├── controller_manager_config.yaml
│ │ ├── kustomization.yaml
│ │ ├── manager.yaml
│ │ └── patch.yaml
│ ├── prometheus/
│ │ ├── kustomization.yaml
│ │ └── monitor.yaml
│ └── rbac/
│ ├── auth_proxy_client_clusterrole.yaml
│ ├── auth_proxy_role.yaml
│ ├── auth_proxy_role_binding.yaml
│ ├── auth_proxy_service.yaml
│ ├── cluster_role_binding.yaml
│ ├── imagejob_pods_service.yaml
│ ├── imagelist_editor_role.yaml
│ ├── imagelist_viewer_role.yaml
│ ├── kustomization.yaml
│ ├── leader_election_role.yaml
│ ├── leader_election_role_binding.yaml
│ ├── role.yaml
│ ├── role_binding.yaml
│ └── service_account.yaml
├── controllers/
│ ├── configmap/
│ │ └── configmap.go
│ ├── controller.go
│ ├── imagecollector/
│ │ └── imagecollector_controller.go
│ ├── imagejob/
│ │ └── imagejob_controller.go
│ ├── imagelist/
│ │ └── imagelist_controller.go
│ ├── suite_test.go
│ └── util/
│ └── util.go
├── demo/
│ ├── README.md
│ ├── demo-magic.sh
│ ├── demo.sh
│ └── ds.yaml
├── deploy/
│ └── eraser.yaml
├── docs/
│ ├── README.md
│ ├── babel.config.js
│ ├── design/
│ │ └── README.md
│ ├── docs/
│ │ ├── architecture.md
│ │ ├── code-of-conduct.md
│ │ ├── contributing.md
│ │ ├── custom-scanner.md
│ │ ├── customization.md
│ │ ├── exclusion.md
│ │ ├── faq.md
│ │ ├── installation.md
│ │ ├── introduction.md
│ │ ├── manual-removal.md
│ │ ├── metrics.md
│ │ ├── quick-start.md
│ │ ├── release-management.md
│ │ ├── releasing.md
│ │ ├── setup.md
│ │ └── trivy.md
│ ├── docusaurus.config.js
│ ├── package.json
│ ├── sidebars.js
│ ├── src/
│ │ └── css/
│ │ └── custom.css
│ ├── static/
│ │ └── .nojekyll
│ ├── versioned_docs/
│ │ ├── version-v0.4.x/
│ │ │ ├── architecture.md
│ │ │ ├── code-of-conduct.md
│ │ │ ├── contributing.md
│ │ │ ├── custom-scanner.md
│ │ │ ├── customization.md
│ │ │ ├── exclusion.md
│ │ │ ├── faq.md
│ │ │ ├── installation.md
│ │ │ ├── introduction.md
│ │ │ ├── manual-removal.md
│ │ │ ├── quick-start.md
│ │ │ ├── releasing.md
│ │ │ ├── setup.md
│ │ │ └── trivy.md
│ │ ├── version-v0.5.x/
│ │ │ ├── architecture.md
│ │ │ ├── code-of-conduct.md
│ │ │ ├── contributing.md
│ │ │ ├── custom-scanner.md
│ │ │ ├── customization.md
│ │ │ ├── exclusion.md
│ │ │ ├── faq.md
│ │ │ ├── installation.md
│ │ │ ├── introduction.md
│ │ │ ├── manual-removal.md
│ │ │ ├── quick-start.md
│ │ │ ├── releasing.md
│ │ │ ├── setup.md
│ │ │ └── trivy.md
│ │ ├── version-v1.0.x/
│ │ │ ├── architecture.md
│ │ │ ├── code-of-conduct.md
│ │ │ ├── contributing.md
│ │ │ ├── custom-scanner.md
│ │ │ ├── customization.md
│ │ │ ├── exclusion.md
│ │ │ ├── faq.md
│ │ │ ├── installation.md
│ │ │ ├── introduction.md
│ │ │ ├── manual-removal.md
│ │ │ ├── metrics.md
│ │ │ ├── quick-start.md
│ │ │ ├── releasing.md
│ │ │ ├── setup.md
│ │ │ └── trivy.md
│ │ ├── version-v1.1.x/
│ │ │ ├── architecture.md
│ │ │ ├── code-of-conduct.md
│ │ │ ├── contributing.md
│ │ │ ├── custom-scanner.md
│ │ │ ├── customization.md
│ │ │ ├── exclusion.md
│ │ │ ├── faq.md
│ │ │ ├── installation.md
│ │ │ ├── introduction.md
│ │ │ ├── manual-removal.md
│ │ │ ├── metrics.md
│ │ │ ├── quick-start.md
│ │ │ ├── releasing.md
│ │ │ ├── setup.md
│ │ │ └── trivy.md
│ │ ├── version-v1.2.x/
│ │ │ ├── architecture.md
│ │ │ ├── code-of-conduct.md
│ │ │ ├── contributing.md
│ │ │ ├── custom-scanner.md
│ │ │ ├── customization.md
│ │ │ ├── exclusion.md
│ │ │ ├── faq.md
│ │ │ ├── installation.md
│ │ │ ├── introduction.md
│ │ │ ├── manual-removal.md
│ │ │ ├── metrics.md
│ │ │ ├── quick-start.md
│ │ │ ├── releasing.md
│ │ │ ├── setup.md
│ │ │ └── trivy.md
│ │ ├── version-v1.3.x/
│ │ │ ├── architecture.md
│ │ │ ├── code-of-conduct.md
│ │ │ ├── contributing.md
│ │ │ ├── custom-scanner.md
│ │ │ ├── customization.md
│ │ │ ├── exclusion.md
│ │ │ ├── faq.md
│ │ │ ├── installation.md
│ │ │ ├── introduction.md
│ │ │ ├── manual-removal.md
│ │ │ ├── metrics.md
│ │ │ ├── quick-start.md
│ │ │ ├── release-management.md
│ │ │ ├── releasing.md
│ │ │ ├── setup.md
│ │ │ └── trivy.md
│ │ └── version-v1.4.x/
│ │ ├── architecture.md
│ │ ├── code-of-conduct.md
│ │ ├── contributing.md
│ │ ├── custom-scanner.md
│ │ ├── customization.md
│ │ ├── exclusion.md
│ │ ├── faq.md
│ │ ├── installation.md
│ │ ├── introduction.md
│ │ ├── manual-removal.md
│ │ ├── metrics.md
│ │ ├── quick-start.md
│ │ ├── release-management.md
│ │ ├── releasing.md
│ │ ├── setup.md
│ │ └── trivy.md
│ ├── versioned_sidebars/
│ │ ├── version-v0.4.x-sidebars.json
│ │ ├── version-v0.5.x-sidebars.json
│ │ ├── version-v1.0.x-sidebars.json
│ │ ├── version-v1.1.x-sidebars.json
│ │ ├── version-v1.2.x-sidebars.json
│ │ ├── version-v1.3.x-sidebars.json
│ │ └── version-v1.4.x-sidebars.json
│ └── versions.json
├── go.mod
├── go.sum
├── hack/
│ ├── boilerplate.go.txt
│ ├── go-install.sh
│ └── rootless_docker.sh
├── main.go
├── manifest_staging/
│ └── deploy/
│ └── eraser.yaml
├── pkg/
│ ├── collector/
│ │ ├── collector.go
│ │ └── helpers.go
│ ├── cri/
│ │ ├── client.go
│ │ ├── client_v1.go
│ │ ├── client_v1alpha2.go
│ │ └── util.go
│ ├── logger/
│ │ └── zap.go
│ ├── metrics/
│ │ ├── metrics.go
│ │ └── metrics_test.go
│ ├── scanners/
│ │ ├── template/
│ │ │ └── scanner_template.go
│ │ └── trivy/
│ │ ├── helpers.go
│ │ ├── trivy.go
│ │ ├── trivy_test.go
│ │ ├── types.go
│ │ └── types_test.go
│ └── utils/
│ ├── flag.go
│ ├── pod_info.go
│ ├── security_context.go
│ ├── utils.go
│ └── utils_test.go
├── test/
│ └── e2e/
│ ├── kind-config-custom-runtime.yaml
│ ├── kind-config.yaml
│ ├── test-data/
│ │ ├── Dockerfile.busybox
│ │ ├── Dockerfile.customNode
│ │ ├── Dockerfile.dummyCollector
│ │ ├── eraser_v1_imagelist.yaml
│ │ ├── eraser_v1alpha1_imagelist.yaml
│ │ ├── eraser_v1alpha1_imagelist_updated.yaml
│ │ ├── helm-empty-values.yaml
│ │ ├── helm-test-config.yaml
│ │ ├── imagelist_alpine.yaml
│ │ └── otelcollector.yaml
│ ├── tests/
│ │ ├── collector_delete_deployment/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── collector_delete_manager/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── collector_disable_scan/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── collector_ensure_scan/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── collector_pipeline/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── collector_runtime_config/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── collector_skip_excluded/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── configmap_update/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── helm_pull_secret/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── helm_pull_secret_imagelist/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── imagelist_alias/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── imagelist_change/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── imagelist_exclusion_list/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── imagelist_include_nodes/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── imagelist_prune_images/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── imagelist_rm_images/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── imagelist_skip_nodes/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── metrics_test_disable_scanner/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ ├── metrics_test_eraser/
│ │ │ ├── eraser_test.go
│ │ │ └── main_test.go
│ │ └── metrics_test_scanner/
│ │ ├── eraser_test.go
│ │ └── main_test.go
│ └── util/
│ ├── kubectl.go
│ ├── utils.go
│ └── utils_test.go
├── third_party/
│ └── open-policy-agent/
│ └── gatekeeper/
│ └── helmify/
│ ├── LICENSE
│ ├── README.md
│ ├── kustomization.yaml
│ ├── kustomize-for-helm.yaml
│ ├── main.go
│ ├── replacements.go
│ └── static/
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── README.md
│ ├── templates/
│ │ ├── _helpers.tpl
│ │ └── configmap.yaml
│ └── values.yaml
└── version/
├── version.go
└── version_test.go
================================================
FILE CONTENTS
================================================
================================================
FILE: .dockerignore
================================================
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
# Ignore all files which are not go type
!**/*.go
!**/*.mod
!**/*.sum
================================================
FILE: .github/ISSUE_TEMPLATE/bug-report.yml
================================================
name: Bug Report
description: Report a bug in Eraser
title: "[BUG]
"
labels:
- "bug"
body:
- type: markdown
attributes:
value: |
Please search to see if an issue already exists for your bug before continuing.
> If you need to report a security issue please see https://github.com/eraser-dev/eraser/security/policy instead.
- type: input
attributes:
label: Version of Eraser
placeholder: Release version (e.g. v1.0.0) or `git describe --dirty` output if built from source
- type: textarea
attributes:
label: Expected Behavior
description: Briefly describe what you expect to happen.
- type: textarea
attributes:
label: Actual Behavior
description: Briefly describe what is actually happening.
- type: textarea
attributes:
label: Steps To Reproduce
description: Detailed steps to reproduce the behavior.
placeholder: |
1. In Kubernetes v1.27.0 ...
2. With this config...
3. Run '...'
4. See error...
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out a bug report!
- type: checkboxes
id: idea
attributes:
label: "Are you willing to submit PRs to contribute to this bug fix?"
description: "This is absolutely not required, but we are happy to guide you in the contribution process especially when you already have a good proposal or understanding of how to implement it. Join us at the `#eraser` channel on the [Kubernetes Slack](https://kubernetes.slack.com/archives/C03Q8KV8YQ4) if you have any questions."
options:
- label: Yes, I am willing to implement it.
================================================
FILE: .github/ISSUE_TEMPLATE/feature-request.yml
================================================
name: Request
description: Request a new feature or propose an enhancement to Eraser
title: "[REQ] "
labels:
- "enhancement"
body:
- type: markdown
attributes:
value: |
Please search to see if an issue already exists for your request before continuing.
- type: dropdown
attributes:
label: What kind of request is this?
multiple: false
options:
- New feature
- Improvement of existing experience
- Other
- type: textarea
attributes:
label: What is your request or suggestion?
placeholder: |
e.g. I would like Eraser to add this so that I can use it in my .
e.g. When using Eraser the has this and it would be better if it has this .
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out a request!
- type: checkboxes
id: idea
attributes:
label: "Are you willing to submit PRs to contribute to this feature request?"
description: "This is absolutely not required, but we are happy to guide you in the contribution process especially when you already have a good proposal or understanding of how to implement it. Join us at the `#eraser` channel on the [Kubernetes Slack](https://kubernetes.slack.com/archives/C03Q8KV8YQ4) if you have any questions."
options:
- label: Yes, I am willing to implement it.
================================================
FILE: .github/PULL_REQUEST_TEMPLATE.md
================================================
**What this PR does / why we need it**:
**Which issue(s) this PR fixes** *(optional, using `fixes #(, fixes #, ...)` format, will close the issue(s) when the PR gets merged)*:
Fixes #
**Special notes for your reviewer**:
================================================
FILE: .github/dependabot.yml
================================================
version: 2
updates:
- package-ecosystem: "npm"
directory: "/docs"
schedule:
interval: "weekly"
commit-message:
prefix: "chore"
groups:
docusaurus:
patterns:
- "@docusaurus/*"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
commit-message:
prefix: "chore"
groups:
all:
patterns:
- "*"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
commit-message:
prefix: "chore"
ignore:
- dependency-name: "*"
update-types:
- "version-update:semver-major"
- "version-update:semver-minor"
groups:
k8s:
patterns:
- "k8s.io/*"
exclude-patterns:
- "k8s.io/cri-api"
- package-ecosystem: docker
directory: /
schedule:
interval: weekly
- package-ecosystem: docker
directory: /build/tooling
schedule:
interval: weekly
================================================
FILE: .github/semantic.yml
================================================
titleOnly: true
types:
- build
- chore
- ci
- docs
- feat
- fix
- perf
- refactor
- revert
- style
- test
================================================
FILE: .github/workflows/README.md
================================================
# GitHub Workflows
This directory contains all of our workflows used in our GitHub CI/CD pipeline.
## Descriptions
### [Scan Images for Vulnerabilities (Trivy)](scan-images.yaml)
Our images are scheduled to be scanned for vulnerabilities using Trivy every Monday at 07:00 UTC.
#### Weekly Scans
By default, our images are built from the `main` branch, and any vulnerabilities caught are published in the [Github Security tab](https://github.com/eraser-dev/eraser/security).
#### Dispatching a Scan
We can do a manual dispatch of the workflow and specify the released version to scan, e.g. `v1.3.0-beta.0`. If left blank, the image will be built off of the branch the workflow is dispatched from.
If we want to publish those results to our [Github Security tab](https://github.com/eraser-dev/eraser/security), we need to toggle the `upload-results` input to `true`.
#### Scan Results
The scan results are automatically stored in the run artifacts. Those can be accessed by going into the workflow run, and under the run's **Summary** there is an **Artifacts** section storing all the images' scan results.
If the `upload-results` input is set to `true`, any vulnerabilities found will be published in the [Github Security tab](https://github.com/eraser-dev/eraser/security).
================================================
FILE: .github/workflows/build-id.yaml
================================================
name: Image build definitions for e2e tests
on:
workflow_call:
outputs:
build-id:
description: "random build id to keep things together"
value: ${{ jobs.generate-build-id.outputs.image-build-id }}
bucket-id:
description: "docker-images-"
value: ${{ jobs.generate-build-id.outputs.bucket-id }}
permissions:
contents: read
jobs:
generate-build-id:
name: "Generate Build ID"
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- id: build-id
run: |
build_id="$(date +%s)"
echo build-id=$build_id | tee -a $GITHUB_OUTPUT
echo bucket-id=docker-images-$build_id | tee -a $GITHUB_OUTPUT
outputs:
image-build-id: ${{ steps.build-id.outputs.build-id }}
bucket-id: ${{ steps.build-id.outputs.bucket-id }}
================================================
FILE: .github/workflows/codeql.yaml
================================================
name: "CodeQL"
on:
push:
branches: [ main ]
schedule:
- cron: '0 7 * * 1' # Monday at 7:00 AM
permissions: read-all
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: Checkout repository
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
- name: Initialize CodeQL
uses: github/codeql-action/init@fdbfb4d2750291e159f0156def62b853c2798ca2
with:
languages: ${{ matrix.language }}
- name: Autobuild
uses: github/codeql-action/autobuild@fdbfb4d2750291e159f0156def62b853c2798ca2
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@fdbfb4d2750291e159f0156def62b853c2798ca2
================================================
FILE: .github/workflows/dep-review.yaml
================================================
name: 'Dependency Review'
on: [pull_request]
permissions:
contents: read
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: 'Checkout Repository'
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
- name: 'Dependency Review'
uses: actions/dependency-review-action@0659a74c94536054bfa5aeb92241f70d680cc78e
================================================
FILE: .github/workflows/deploy_docs.yaml
================================================
name: Generate docs website to GitHub Pages
on:
push:
branches:
- main
paths:
- '.github/workflows/deploy_docs.yaml'
- 'docs/**'
pull_request:
branches:
- main
paths:
- '.github/workflows/deploy_docs.yaml'
- 'docs/**'
permissions:
contents: read
jobs:
deploy:
name: Generate docs website to GitHub Pages
runs-on: ubuntu-latest
permissions:
contents: write
defaults:
run:
working-directory: docs
steps:
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: Setup Node
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with:
node-version: 20.x
- name: Get yarn cache
id: yarn-cache
run: echo "dir=$(yarn cache dir)" > $GITHUB_OUTPUT
- name: Cache dependencies
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: ${{ steps.yarn-cache.outputs.dir }}
key: ${{ runner.os }}-website-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-website-
- run: yarn install --frozen-lockfile
- run: yarn build
- name: Deploy to GitHub Pages
if: github.ref == 'refs/heads/main' && github.event_name == 'push' && github.repository == 'eraser-dev/eraser'
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./docs/build
destination_dir: ./docs
================================================
FILE: .github/workflows/e2e-build.yaml
================================================
name: Image build definitions for e2e tests
on:
workflow_call:
inputs:
bucket-id:
required: true
type: string
jobs:
build-remover:
name: "Build remover image for e2e tests"
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: Set up Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: "1.25"
check-latest: true
- name: Setup buildx instance
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
with:
use: true
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
key: ${{ runner.OS }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
path: |
~/go/pkg/mod
~/.cache/go-build
- uses: crazy-max/ghaction-github-runtime@3cb05d89e1f492524af3d41a1c98c83bc3025124 # v3.1.0
- name: Check out code into the Go module directory
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- run: 'echo ${{ inputs.bucket-id }}'
- name: Set env
run: |
echo REMOVER_REPO=remover >> $GITHUB_ENV
echo REMOVER_TAG=test >> $GITHUB_ENV
- name: Build remover
run: 'make docker-build-remover OUTPUT_TYPE=type=oci,dest=./${REMOVER_REPO}_${REMOVER_TAG}.tar,name=${REMOVER_REPO}:${REMOVER_TAG}'
- name: Upload Build Artifacts
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: ${{ inputs.bucket-id }}-remover
path: remover_test.tar
overwrite: true
build-trivy-scanner:
name: "Build trivy-scanner image for e2e tests"
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: Set up Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: "1.25"
check-latest: true
- name: Setup buildx instance
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
with:
use: true
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
key: ${{ runner.OS }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
path: |
~/go/pkg/mod
~/.cache/go-build
- uses: crazy-max/ghaction-github-runtime@3cb05d89e1f492524af3d41a1c98c83bc3025124 # v3.1.0
- name: Check out code into the Go module directory
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set env
run: |
echo TRIVY_SCANNER_REPO=scanner >> $GITHUB_ENV
echo TRIVY_SCANNER_TAG=test >> $GITHUB_ENV
- name: Build trivy-scanner
run: 'make docker-build-trivy-scanner OUTPUT_TYPE=type=oci,dest=./${TRIVY_SCANNER_REPO}_${TRIVY_SCANNER_TAG}.tar,name=${TRIVY_SCANNER_REPO}:${TRIVY_SCANNER_TAG}'
- name: Upload Build Artifacts
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: ${{ inputs.bucket-id }}-scanner
path: scanner_test.tar
overwrite: true
build-manager:
name: "Build manager image for e2e tests"
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: Set up Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: "1.25"
check-latest: true
- name: Setup buildx instance
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
with:
use: true
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
key: ${{ runner.OS }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
path: |
~/go/pkg/mod
~/.cache/go-build
- uses: crazy-max/ghaction-github-runtime@3cb05d89e1f492524af3d41a1c98c83bc3025124 # v3.1.0
- name: Check out code into the Go module directory
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set env
run: |
echo MANAGER_REPO=manager >> $GITHUB_ENV
echo MANAGER_TAG=test >> $GITHUB_ENV
- name: Build manager
run: 'make docker-build-manager OUTPUT_TYPE=type=oci,dest=./${MANAGER_REPO}_${MANAGER_TAG}.tar,name=${MANAGER_REPO}:${MANAGER_TAG}'
- name: Upload Build Artifacts
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: ${{ inputs.bucket-id }}-manager
path: manager_test.tar
overwrite: true
build-collector:
name: "Build collector image for e2e tests"
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: Set up Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: "1.25"
check-latest: true
- name: Setup buildx instance
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
with:
use: true
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
key: ${{ runner.OS }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
path: |
~/go/pkg/mod
~/.cache/go-build
- uses: crazy-max/ghaction-github-runtime@3cb05d89e1f492524af3d41a1c98c83bc3025124 # v3.1.0
- name: Check out code into the Go module directory
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set env
run: |
echo COLLECTOR_REPO=collector >> $GITHUB_ENV
echo COLLECTOR_TAG=test >> $GITHUB_ENV
- name: Build collector
run: 'make docker-build-collector OUTPUT_TYPE=type=oci,dest=./${COLLECTOR_REPO}_${COLLECTOR_TAG}.tar,name=${COLLECTOR_REPO}:${COLLECTOR_TAG}'
- name: Upload Build Artifacts
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: ${{ inputs.bucket-id }}-collector
path: collector_test.tar
overwrite: true
================================================
FILE: .github/workflows/e2e-test.yaml
================================================
name: Run E2E tests
on:
workflow_call:
inputs:
upgrade-test:
required: false
type: string
bucket-id:
required: true
type: string
permissions:
contents: read
jobs:
build-e2e-test-list:
name: "Build E2E Test List"
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- name: Check out code into the Go module directory
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- id: set-test-matrix
run: |
readarray -d '' test_dirs < <(find ./test/e2e/tests -mindepth 1 -type d -print0)
json_array="$(printf "%s\n" "${test_dirs[@]}" | jq -R . | jq -cs)"
echo "e2e-tests=${json_array}" > $GITHUB_OUTPUT
outputs:
e2e-tests: ${{ steps.set-test-matrix.outputs.e2e-tests }}
e2e-test:
name: "E2E Tests"
runs-on: ubuntu-latest
timeout-minutes: 20
needs:
- build-e2e-test-list
permissions:
contents: write
strategy:
fail-fast: false
matrix:
KUBERNETES_VERSION: ["1.27.13", "1.28.9", "1.29.4", "1.30.2"]
E2E_TEST: ${{ fromJson(needs.build-e2e-test-list.outputs.e2e-tests) }}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: Check out code into the Go module directory
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Fetch Build Artifacts
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
with:
pattern: ${{ inputs.bucket-id }}-*
path: ${{ github.workspace }}/images
merge-multiple: true
- name: Set up Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: "1.25"
check-latest: true
- name: Set env
run: |
ARCHIVE_DIR=${{ github.workspace }}/images
REMOVER_REPO=remover
MANAGER_REPO=manager
COLLECTOR_REPO=collector
TRIVY_SCANNER_REPO=scanner
REMOVER_TAG=test
MANAGER_TAG=test
COLLECTOR_TAG=test
TRIVY_SCANNER_TAG=test
echo REMOVER_REPO=$REMOVER_REPO >> $GITHUB_ENV
echo MANAGER_REPO=$MANAGER_REPO >> $GITHUB_ENV
echo COLLECTOR_REPO=$COLLECTOR_REPO >> $GITHUB_ENV
echo TRIVY_SCANNER_REPO=$TRIVY_SCANNER_REPO >> $GITHUB_ENV
echo REMOVER_TAG=$REMOVER_TAG >> $GITHUB_ENV
echo MANAGER_TAG=$MANAGER_TAG >> $GITHUB_ENV
echo COLLECTOR_TAG=$COLLECTOR_TAG >> $GITHUB_ENV
echo TRIVY_SCANNER_TAG=$TRIVY_SCANNER_TAG >> $GITHUB_ENV
echo ARCHIVE_DIR=$ARCHIVE_DIR >> $GITHUB_ENV
echo REMOVER_TARBALL_PATH=$ARCHIVE_DIR/${REMOVER_REPO}_${REMOVER_TAG}.tar >> $GITHUB_ENV
echo MANAGER_TARBALL_PATH=$ARCHIVE_DIR/${MANAGER_REPO}_${MANAGER_TAG}.tar >> $GITHUB_ENV
echo COLLECTOR_TARBALL_PATH=$ARCHIVE_DIR/${COLLECTOR_REPO}_${COLLECTOR_TAG}.tar >> $GITHUB_ENV
echo SCANNER_TARBALL_PATH=$ARCHIVE_DIR/${TRIVY_SCANNER_REPO}_${TRIVY_SCANNER_TAG}.tar >> $GITHUB_ENV
if [[ -n "${{ inputs.upgrade-test }}" ]]; then
echo HELM_UPGRADE_TEST=1 >> $GITHUB_ENV
fi
- name: Run e2e test
run: |
make e2e-test \
KUBERNETES_VERSION=${{ matrix.KUBERNETES_VERSION }} \
E2E_TESTS=${{ matrix.E2E_TEST }}
- name: Remove slash from E2E_TEST
run: |
E2E_TEST=${{ matrix.E2E_TEST }}
E2E_TEST=${E2E_TEST//\//_}
echo "E2E_TEST=${E2E_TEST}" >> $GITHUB_ENV
- name: Upload artifacts
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
if: always()
with:
name: test_logs_${{ matrix.KUBERNETES_VERSION }}_${{ env.E2E_TEST }}
path: ${{ github.workspace }}/test_logs/
retention-days: 1
overwrite: true
================================================
FILE: .github/workflows/patch-docs.yaml
================================================
name: patch_docs
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[1-9]+' # run this workflow when a new patch version is published
permissions:
contents: write
pull-requests: write
jobs:
patch-docs:
runs-on: ubuntu-22.04
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- name: Set release version and target branch for vNext
if: github.event_name == 'push'
run: |
TAG="$(echo "${{ github.ref }}" | tr -d 'refs/tags/v')"
MAJOR_VERSION="$(echo "${TAG}" | cut -d '.' -f1)"
echo "MAJOR_VERSION=${MAJOR_VERSION}" >> ${GITHUB_ENV}
MINOR_VERSION="$(echo "${TAG}" | cut -d '.' -f2)"
echo "MINOR_VERSION=${MINOR_VERSION}" >> ${GITHUB_ENV}
PATCH_VERSION="$(echo "${TAG}" | cut -d '.' -f3)"
echo "PATCH_VERSION=${PATCH_VERSION}" >> ${GITHUB_ENV}
echo "TAG=${TAG}" >> ${GITHUB_ENV}
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
- name: Create release branch if needed # patched docs are always being merged to the main branch
run: |
git checkout main
- name: Create patch version docs
run: make patch-version-docs NEWVERSION=v${MAJOR_VERSION}.${MINOR_VERSION}.x TAG=v${TAG} OLDVERSION=v${MAJOR_VERSION}.${MINOR_VERSION}.$((PATCH_VERSION-1))
- name: Create release pull request
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
with:
commit-message: "chore: Patch docs for ${{ env.TAG }} release"
title: "chore: Patch docs for ${{ env.TAG }} release"
branch: "patch-docs-${{ env.TAG }}"
base: "main"
signoff: true
labels: |
release-pr
${{ github.event.inputs.release_version }}
================================================
FILE: .github/workflows/release-pr.yaml
================================================
name: create_release_pull_request
on:
push:
tags:
- 'v[0-9]+.[0-9]+.0' # run this workflow when a new minor version is published
workflow_dispatch:
inputs:
release_version:
description: 'Which version are we creating a release pull request for?'
required: true
permissions:
contents: write
pull-requests: write
jobs:
create-release-pull-request:
runs-on: ubuntu-latest
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- name: Set up Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: "1.25"
check-latest: true
- name: Set release version and target branch for vNext
if: github.event_name == 'push'
run: |
TAG="$(echo "${{ github.ref }}" | tr -d 'refs/tags/v')"
MAJOR_VERSION="$(echo "${TAG}" | cut -d '.' -f1)"
echo "MAJOR_VERSION=${MAJOR_VERSION}" >> ${GITHUB_ENV}
MINOR_VERSION="$(echo "${TAG}" | cut -d '.' -f2)"
echo "MINOR_VERSION=${MINOR_VERSION}" >> ${GITHUB_ENV}
# increment the minor version by 1 for vNext
echo "NEWVERSION=v${MAJOR_VERSION}.$((MINOR_VERSION+1)).0-beta.0" >> ${GITHUB_ENV}
# pre-release is always being merged to the main branch
echo "TARGET_BRANCH=main" >> ${GITHUB_ENV}
echo "TAG=${TAG}" >> ${GITHUB_ENV}
- name: Set release version and target branch from input
if: github.event_name == 'workflow_dispatch'
run: |
NEWVERSION="${{ github.event.inputs.release_version }}"
echo "${NEWVERSION}" | grep -E '^v[0-9]+\.[0-9]+\.[0-9](-(beta|rc)\.[0-9]+)?$' || (echo "release_version should be in the format vX.Y.Z, vX.Y.Z-beta.A, or vX.Y.Z-rc.B" && exit 1)
echo "NEWVERSION=${NEWVERSION}" >> ${GITHUB_ENV}
echo "TAG=${NEWVERSION}" >> ${GITHUB_ENV}
MAJOR_VERSION="$(echo "${NEWVERSION}" | cut -d '.' -f1 | tr -d 'v')"
MINOR_VERSION="$(echo "${NEWVERSION}" | cut -d '.' -f2)"
# non-beta releases should always be merged to release branches
echo "TARGET_BRANCH=release-${MAJOR_VERSION}.${MINOR_VERSION}" >> ${GITHUB_ENV}
# beta releases should always be merged to main
if [[ "${NEWVERSION}" =~ "beta" ]]; then
echo "TARGET_BRANCH=main" >> ${GITHUB_ENV}
fi
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
- name: Create release branch if needed
run: |
git checkout "${TARGET_BRANCH}" && exit 0
# Create and push release branch if it doesn't exist
git checkout -b "${TARGET_BRANCH}"
git push --set-upstream origin "${TARGET_BRANCH}"
- run: make release-manifest promote-staging-manifest
- if: github.event_name == 'push'
run: make version-docs NEWVERSION=v${MAJOR_VERSION}.${MINOR_VERSION}.x TAG=v${TAG}
- name: Create release pull request
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
with:
commit-message: "chore: Prepare ${{ env.NEWVERSION }} release"
title: "chore: Prepare ${{ env.NEWVERSION }} release"
branch: "release-${{ env.NEWVERSION }}"
base: "${{ env.TARGET_BRANCH }}"
signoff: true
================================================
FILE: .github/workflows/release.yaml
================================================
name: release
on:
push:
# Sequence of patterns matched against refs/tags
tags:
- 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
env:
REGISTRY: ghcr.io
permissions:
contents: write
packages: write
jobs:
build-publish-release:
name: "release"
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: Check out code into the Go module directory
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Setup buildx instance
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
with:
use: true
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
key: ${{ runner.OS }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
path: |
~/go/pkg/mod
~/.cache/go-build
- uses: crazy-max/ghaction-github-runtime@3cb05d89e1f492524af3d41a1c98c83bc3025124 # v3.1.0
- name: Get tag
run: |
echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Log in to the GHCR
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build eraser-manager
run: make docker-build-manager \
CACHE_FROM=type=gha,scope=eraser-manager \
CACHE_TO=type=gha,scope=eraser-manager,mode=max \
PLATFORM="linux/amd64,linux/arm64" \
OUTPUT_TYPE=type=registry \
GENERATE_ATTESTATIONS=true \
MANAGER_IMG=${{ env.REGISTRY }}/${GITHUB_REPOSITORY_OWNER}/eraser-manager:${TAG}
- name: Build remover
run: make docker-build-remover \
CACHE_FROM=type=gha,scope=eraser-node \
CACHE_TO=type=gha,scope=eraser-node,mode=max \
PLATFORM="linux/amd64,linux/arm64" \
OUTPUT_TYPE=type=registry \
GENERATE_ATTESTATIONS=true \
REMOVER_IMG=${{ env.REGISTRY }}/${GITHUB_REPOSITORY_OWNER}/remover:${TAG}
- name: Build collector
run: make docker-build-collector \
CACHE_FROM=type=gha,scope=collector \
CACHE_TO=type=gha,scope=collector,mode=max \
PLATFORM="linux/amd64,linux/arm64" \
OUTPUT_TYPE=type=registry \
GENERATE_ATTESTATIONS=true \
COLLECTOR_IMG=${{ env.REGISTRY }}/${GITHUB_REPOSITORY_OWNER}/collector:${TAG}
- name: Build Trivy scanner
run: make docker-build-trivy-scanner \
CACHE_FROM=type=gha,scope=trivy-scanner \
CACHE_TO=type=gha,scope=trivy-scanner,mode=max \
PLATFORM="linux/amd64,linux/arm64" \
OUTPUT_TYPE=type=registry \
GENERATE_ATTESTATIONS=true \
TRIVY_SCANNER_IMG=${{ env.REGISTRY }}/${GITHUB_REPOSITORY_OWNER}/eraser-trivy-scanner:${TAG}
- name: Create GitHub release
uses: marvinpinto/action-automatic-releases@919008cf3f741b179569b7a6fb4d8860689ab7f0 # v1.2.1
with:
repo_token: "${{ secrets.GITHUB_TOKEN }}"
prerelease: false
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@0ad2bb377311d61ac04ad9eb6f252fb68e207260 # v1.7.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
charts_dir: charts
target_dir: charts
linting: off
================================================
FILE: .github/workflows/scan-images.yaml
================================================
name: Scan Images for Vulnerabilities (Trivy)
run-name: Scan ${{ inputs.version == '' && github.ref_name || inputs.version }} images for vulnerabilities ${{ github.event_name == 'schedule' && '(scheduled)' || '' }}
on:
schedule:
- cron: "0 7 * * 1" # Run every Monday at 7:00 AM UTC
workflow_dispatch:
inputs:
version:
description: "Version of Eraser to run Trivy scans against. Leave empty to scan images built from the branch the action is running against."
type: string
required: false
default: ""
upload-results:
description: "Upload results to Github Security?"
type: boolean
required: true
default: false
permissions: read-all
env:
# Scanning released versions require the project `eraser-dev` as part of the registry name.
REGISTRY: ghcr.io/${{ github.event.inputs.version == '' && 'eraser-test' || 'eraser-dev' }}
TAG: ${{ github.event.inputs.version == '' && 'test' || github.event.inputs.version }}
jobs:
scan_vulnerabilities:
name: Scan ${{ matrix.data.image }} for vulnerabilities
runs-on: ubuntu-latest
timeout-minutes: 15
strategy:
matrix:
data:
- {image: remover, build_cmd: docker-build-remover, repo_environment_var: REMOVER_REPO}
- {image: eraser-manager, build_cmd: docker-build-manager, repo_environment_var: MANAGER_REPO}
- {image: collector, build_cmd: docker-build-collector, repo_environment_var: COLLECTOR_REPO}
- {image: eraser-trivy-scanner, build_cmd: docker-build-trivy-scanner, repo_environment_var: TRIVY_SCANNER_REPO}
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- name: Check out code
if: github.event_name == 'schedule' || github.event.inputs.version == ''
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Build image
if: github.event_name == 'schedule' || github.event.inputs.version == ''
run: |
make ${{ matrix.data.build_cmd }} VERSION=${{ env.TAG }} ${{ matrix.data.repo_environment_var }}=${{ env.REGISTRY }}/${{ matrix.data.image }}
- name: Scan for vulnerabilities
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # 0.33.1
with:
image-ref: ${{ env.REGISTRY }}/${{ matrix.data.image }}:${{ env.TAG }}
vuln-type: 'os,library'
ignore-unfixed: true
format: 'sarif'
output: ${{ matrix.data.image }}-results.sarif
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: ${{ matrix.data.image }} Scan Results
path: ${{ matrix.data.image }}-results.sarif
overwrite: true
upload_vulnerabilities:
name: Upload ${{ matrix.image }} results to GitHub Security
runs-on: ubuntu-latest
needs: scan_vulnerabilities
if: github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && github.event.inputs.upload-results == 'true')
permissions:
actions: read
contents: read
security-events: write
strategy:
matrix:
image: [remover, eraser-manager, collector, eraser-trivy-scanner]
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
with:
name: ${{ matrix.image }} Scan Results
path: ${{ matrix.image }}-results.sarif
merge-multiple: true
- name: Upload results to GitHub Security
uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v2.14.4
with:
sarif_file: ${{ matrix.image }}-results.sarif
================================================
FILE: .github/workflows/scorecard.yml
================================================
name: Scorecard supply-chain security
on:
# For Branch-Protection check. Only the default branch is supported. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
branch_protection_rule:
# To guarantee Maintained check is occasionally updated. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
schedule:
- cron: '0 17 * * 1'
push:
branches: [ "main" ]
# Declare default permissions as read only.
permissions: read-all
jobs:
analysis:
name: Scorecard analysis
runs-on: ubuntu-latest
permissions:
# Needed to upload the results to code-scanning dashboard.
security-events: write
# Needed to publish results and get a badge (see publish_results below).
id-token: write
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2 # v2.13.2
with:
egress-policy: audit
- name: "Checkout code"
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v3.1.0
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
with:
results_file: results.sarif
results_format: sarif
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
# - you want to enable the Branch-Protection check on a *public* repository, or
# - you are installing Scorecard on a *private* repository
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action#authentication-with-pat.
# repo_token: ${{ secrets.SCORECARD_TOKEN }}
# Public repositories:
# - Publish results to OpenSSF REST API for easy access by consumers
# - Allows the repository to include the Scorecard badge.
# - See https://github.com/ossf/scorecard-action#publishing-results.
# For private repositories:
# - `publish_results` will always be set to `false`, regardless
# of the value entered here.
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: SARIF file
path: results.sarif
retention-days: 5
overwrite: true
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v2.2.4
with:
sarif_file: results.sarif
================================================
FILE: .github/workflows/test.yaml
================================================
name: test
on:
push:
paths-ignore:
- "**.md"
- "hack/**"
- "docs/**"
pull_request:
paths-ignore:
- "**.md"
- "hack/**"
- "docs/**"
env:
REGISTRY: ghcr.io
permissions: read-all
jobs:
generate-bucket-id:
name: "Generate build id for storage"
uses: ./.github/workflows/build-id.yaml
build-images:
name: "Build images for e2e tests"
uses: ./.github/workflows/e2e-build.yaml
needs:
- generate-bucket-id
with:
bucket-id: ${{ needs.generate-bucket-id.outputs.bucket-id }}
e2e-test:
name: "Run e2e tests"
uses: ./.github/workflows/e2e-test.yaml
permissions:
contents: write
needs:
- build-images
- generate-bucket-id
with:
bucket-id: ${{ needs.generate-bucket-id.outputs.bucket-id }}
lint:
name: "Lint"
runs-on: ubuntu-latest
timeout-minutes: 40
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: "1.25"
check-latest: true
- name: lint manager
uses: golangci/golangci-lint-action@e7fa5ac41e1cf5b7d48e45e42232ce7ada589601 # v9.1.0
with:
version: latest
args: --timeout=10m
- name: lint remover
uses: golangci/golangci-lint-action@e7fa5ac41e1cf5b7d48e45e42232ce7ada589601 # v9.1.0
with:
version: latest
working-directory: pkg/remover
skip-pkg-cache: true
args: --timeout=10m
- name: lint collector
uses: golangci/golangci-lint-action@e7fa5ac41e1cf5b7d48e45e42232ce7ada589601 # v9.1.0
with:
version: latest
working-directory: pkg/collector
skip-pkg-cache: true
args: --timeout=10m
- name: lint trivvy scanner
uses: golangci/golangci-lint-action@e7fa5ac41e1cf5b7d48e45e42232ce7ada589601 # v9.1.0
with:
version: latest
working-directory: pkg/scanners/trivy
skip-pkg-cache: true
args: --timeout=10m
unit-test:
name: "Unit Tests"
runs-on: ubuntu-latest
timeout-minutes: 40
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: Set up Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: "1.25"
check-latest: true
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
key: ${{ runner.OS }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
path: |
~/go/pkg/mod
~/.cache/go-build
- name: Check out code into the Go module directory
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Unit test
run: make test
- name: Codecov upload
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7
with:
flags: unittests
file: ./cover.out
fail_ci_if_error: false
check-manifest:
name: "Check codegen and manifest"
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: Check out code into the Go module directory
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: "1.25"
check-latest: true
- name: Check go.mod and manifests
run: |
go mod tidy
git diff --exit-code
make generate manifests
git diff --exit-code
scan_vulnerabilities:
name: "[Trivy] Scan for vulnerabilities"
runs-on: ubuntu-latest
timeout-minutes: 15
permissions:
contents: read
steps:
- name: Harden Runner
uses: step-security/harden-runner@95d9a5deda9de15063e7595e9719c11c38c90ae2
with:
egress-policy: audit
- name: Check out code into the Go module directory
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Get repo
run: |
echo "REPO=$(echo $GITHUB_REPOSITORY | awk '{print tolower($0)}')" >> $GITHUB_ENV
- name: Build eraser-manager
run: |
make docker-build-manager MANAGER_REPO=${{ env.REGISTRY }}/${REPO}-manager MANAGER_TAG=test
- name: Build remover
run: |
make docker-build-remover REMOVER_REPO=${{ env.REGISTRY }}/remover REMOVER_TAG=test
- name: Build collector
run: |
make docker-build-collector COLLECTOR_REPO=${{ env.REGISTRY }}/collector COLLECTOR_TAG=test
- name: Build trivy scanner
run: |
make docker-build-trivy-scanner TRIVY_SCANNER_REPO=${{ env.REGISTRY }}/${REPO}-trivy-scanner TRIVY_SCANNER_TAG=test
- name: Run trivy for remover
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
with:
image-ref: ${{ env.REGISTRY }}/remover:test
exit-code: "1"
ignore-unfixed: true
vuln-type: "os,library"
- name: Run trivy for eraser-manager
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
with:
image-ref: ${{ env.REGISTRY }}/${{ env.REPO }}-manager:test
exit-code: "1"
ignore-unfixed: true
vuln-type: "os,library"
- name: Run trivy for collector
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
with:
image-ref: ${{ env.REGISTRY }}/collector:test
exit-code: "1"
ignore-unfixed: true
vuln-type: "os,library"
- name: Run trivy for trivy-scanner
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8
with:
image-ref: ${{ env.REGISTRY }}/${{ env.REPO }}-trivy-scanner:test
exit-code: "1"
ignore-unfixed: true
vuln-type: "os,library"
================================================
FILE: .github/workflows/upgrade.yaml
================================================
name: upgrade
on:
push:
paths:
- "manifest_staging/charts/**"
- ".github/workflows/upgrade.yaml"
pull_request:
paths:
- "manifest_staging/charts/**"
- ".github/workflows/upgrade.yaml"
env:
REGISTRY: ghcr.io
permissions: read-all
jobs:
generate-bucket-id:
name: "Generate build id for storage"
uses: ./.github/workflows/build-id.yaml
build-images:
name: "Build images for e2e tests"
uses: ./.github/workflows/e2e-build.yaml
needs:
- generate-bucket-id
with:
bucket-id: ${{ needs.generate-bucket-id.outputs.bucket-id }}
e2e-test:
name: "Run e2e tests"
uses: ./.github/workflows/e2e-test.yaml
permissions:
contents: write
needs:
- build-images
- generate-bucket-id
with:
upgrade-test: "1"
bucket-id: ${{ needs.generate-bucket-id.outputs.bucket-id }}
================================================
FILE: .gitignore
================================================
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
bin
testbin/*
.eraser
./pkg/eraser/eraser
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Kubernetes Generated files - skip generated files, except for vendored files
!vendor/**/zz_generated.*
# editor and IDE paraphernalia
.idea
*.swp
*.swo
*~
# history files
.history
.vscode/
# macOS
.DS_Store
# docs site
node_modules/
.docusaurus/
/docs/build/
!/build/
/build/*
!/build/tooling/
!/build/version.sh
# e2e test log outputs
test/e2e/tests/eraser_logs/
eraser
remover
================================================
FILE: .golangci.yaml
================================================
version: "2"
run:
go: "1.25"
linters:
default: none
enable:
- errcheck
- copyloopvar # replacement for exportloopref
- forcetypeassert
- gocritic
- goconst
- godot
- gosec
- govet
- ineffassign
- misspell
# - revive # replacement for golint
- staticcheck # includes gosimple and staticcheck
- unused
- whitespace
settings:
gocritic:
enabled-tags:
- performance
gosec:
excludes:
- G108
lll:
line-length: 200
misspell:
locale: US
exclusions:
paths:
- "docs/build/assets/files/.*\\.go"
formatters:
enable:
- gofmt
- gofumpt
- goimports
================================================
FILE: .trivyignore
================================================
GHSA-6xv5-86q9-7xr8
================================================
FILE: CODEOWNERS
================================================
# Global approvers
* @ashnamehrotra @pmengelbert @sozercan
================================================
FILE: CODE_OF_CONDUCT.md
================================================
# CNCF Code of Conduct
This project has adopted the [CNCF Community Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
Resources:
- [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
- [Reporting](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
================================================
FILE: Dockerfile
================================================
# syntax=docker/dockerfile:1.6
# Default Trivy binary image, overwritten by Makefile
ARG TRIVY_BINARY_IMG="ghcr.io/aquasecurity/trivy:0.67.2"
ARG BUILDKIT_SBOM_SCAN_STAGE=builder,manager-build,collector-build,remover-build,trivy-scanner-build
FROM --platform=$TARGETPLATFORM $TRIVY_BINARY_IMG AS trivy-binary
# Build the manager binary
FROM --platform=$BUILDPLATFORM golang:1.25-bookworm AS builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
ENV GOCACHE=/root/gocache
ENV CGO_ENABLED=0
RUN \
--mount=type=cache,target=${GOCACHE} \
--mount=type=cache,target=/go/pkg/mod \
go mod download
COPY . .
ARG LDFLAGS
ARG TARGETOS
ARG TARGETARCH
FROM builder AS manager-build
RUN \
--mount=type=cache,target=${GOCACHE} \
--mount=type=cache,target=/go/pkg/mod \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build ${LDFLAGS:+-ldflags "$LDFLAGS"} -o out/manager main.go
FROM builder AS collector-build
RUN \
--mount=type=cache,target=${GOCACHE} \
--mount=type=cache,target=/go/pkg/mod \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build ${LDFLAGS:+-ldflags "$LDFLAGS"} -o out/collector ./pkg/collector
FROM builder AS remover-build
RUN \
--mount=type=cache,target=${GOCACHE} \
--mount=type=cache,target=/go/pkg/mod \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build ${LDFLAGS:+-ldflags "$LDFLAGS"} -o out/remover ./pkg/remover
FROM builder AS trivy-scanner-build
RUN \
--mount=type=cache,target=${GOCACHE} \
--mount=type=cache,target=/go/pkg/mod \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build ${LDFLAGS:+-ldflags "$LDFLAGS"} -o out/trivy-scanner ./pkg/scanners/trivy
FROM --platform=$TARGETPLATFORM gcr.io/distroless/static:nonroot AS manager
WORKDIR /
COPY --from=manager-build /workspace/out/manager .
USER 65532:65532
ENTRYPOINT ["/manager"]
FROM --platform=$TARGETPLATFORM gcr.io/distroless/static:latest as collector
COPY --from=collector-build /workspace/out/collector /
ENTRYPOINT ["/collector"]
FROM --platform=$TARGETPLATFORM gcr.io/distroless/static:latest as remover
COPY --from=remover-build /workspace/out/remover /
ENTRYPOINT ["/remover"]
FROM --platform=$TARGETPLATFORM gcr.io/distroless/static:latest as trivy-scanner
COPY --from=trivy-scanner-build /workspace/out/trivy-scanner /
COPY --from=trivy-binary /usr/local/bin/trivy /
WORKDIR /var/lib/trivy
ENTRYPOINT ["/trivy-scanner"]
FROM gcr.io/distroless/static-debian12:nonroot AS non-vulnerable
COPY --from=builder /tmp /tmp
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2023 The Linux Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: MAINTAINERS.md
================================================
# Maintainers
Maintainers:
- Sertaç Özercan (@sozercan)
- Ashna Mehrotra (@ashnamehrotra)
- Peter Engelbert (@pmengelbert)
- Brian Goff (@cpuguy83)
================================================
FILE: Makefile
================================================
VERSION := v1.5.0-beta.0
MANAGER_TAG ?= ${VERSION}
TRIVY_SCANNER_TAG ?= ${VERSION}
COLLECTOR_TAG ?= ${VERSION}
REMOVER_TAG ?= ${VERSION}
# Image URL to use all building/pushing image targets
TRIVY_SCANNER_REPO ?= ghcr.io/eraser-dev/eraser-trivy-scanner
TRIVY_SCANNER_IMG ?= ${TRIVY_SCANNER_REPO}:${TRIVY_SCANNER_TAG}
TRIVY_BINARY_REPO ?= ghcr.io/aquasecurity/trivy
TRIVY_BINARY_TAG ?= 0.67.2
TRIVY_BINARY_IMG ?= ${TRIVY_BINARY_REPO}:${TRIVY_BINARY_TAG}
MANAGER_REPO ?= ghcr.io/eraser-dev/eraser-manager
MANAGER_IMG ?= ${MANAGER_REPO}:${MANAGER_TAG}
REMOVER_REPO ?= ghcr.io/eraser-dev/remover
REMOVER_IMG ?= ${REMOVER_REPO}:${REMOVER_TAG}
COLLECTOR_REPO ?= ghcr.io/eraser-dev/collector
COLLECTOR_IMG ?= ${COLLECTOR_REPO}:${COLLECTOR_TAG}
VULNERABLE_IMG ?= docker.io/library/alpine:3.7.3
EOL_IMG ?= docker.io/library/alpine:3.6
BUSYBOX_BASE_IMG ?= busybox:1.36.0
NON_VULNERABLE_IMG ?= ghcr.io/eraser-dev/non-vulnerable:latest
E2E_TESTS ?= $(shell find ./test/e2e/tests/ -mindepth 1 -type d)
API_VERSIONS ?= ./api/v1alpha1,./api/v1,./api/v1alpha2,./api/v1alpha3
HELM_UPGRADE_TEST ?=
TEST_LOGDIR ?= $(PWD)/test_logs
REMOVER_TARBALL_PATH ?=
MANAGER_TARBALL_PATH ?=
COLLECTOR_TARBALL_PATH ?=
SCANNER_TARBALL_PATH ?=
KUSTOMIZE_VERSION ?= 3.8.9
KUBERNETES_VERSION ?= 1.29.2
NODE_VERSION ?= 20-bullseye-slim
ENVTEST_K8S_VERSION ?= 1.25
GOLANGCI_LINT_VERSION := 1.43.0
PLATFORM ?= linux
# build variables
LDFLAGS ?= $(shell build/version.sh "${VERSION}")
ERASER_LDFLAGS ?= -extldflags=-static $(LDFLAGS) -w
TRIVY_SCANNER_LDFLAGS ?= $(ERASER_LDFLAGS) -X 'main.trivyVersion=v$(TRIVY_BINARY_TAG)'
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
ifdef CACHE_TO
_CACHE_TO := --cache-to $(CACHE_TO)
endif
ifdef CACHE_FROM
_CACHE_FROM := --cache-from $(CACHE_FROM)
endif
ifdef GENERATE_ATTESTATIONS
_ATTESTATIONS := --attest type=sbom --attest type=provenance,mode=max
endif
IDFLAGS=
ifeq (false,$(shell hack/rootless_docker.sh))
IDFLAGS=-u $(shell id -u):$(shell id -g)
endif
OUTPUT_TYPE ?= type=docker
TOOLS_DIR := hack/tools
TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/bin)
GO_INSTALL := ./hack/go-install.sh
GOLANGCI_LINT_BIN := golangci-lint
GOLANGCI_LINT := $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN)-v$(GOLANGCI_LINT_VERSION)
TEST_COUNT ?= 1
TIMEOUT ?= 1800s
$(GOLANGCI_LINT):
GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) github.com/golangci/golangci-lint/cmd/golangci-lint $(GOLANGCI_LINT_BIN) v$(GOLANGCI_LINT_VERSION)
# Setting SHELL to bash allows bash commands to be executed by recipes.
# This is a requirement for 'setup-envtest.sh' in the test target.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec
all: build
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Linting
.PHONY: lint
lint: $(GOLANGCI_LINT) ## Runs go linting.
$(GOLANGCI_LINT) run -v
##@ Development
#kustomize_
manifests: __manifest_kustomize __helm_kustomize __controller-gen ## Generates k8s yaml for eraser deployment.
$(CONTROLLER_GEN) \
crd \
rbac:roleName=manager-role \
webhook \
paths="./..." \
output:crd:artifacts:config=config/crd/bases
rm -rf manifest_staging
mkdir -p manifest_staging/deploy
mkdir -p manifest_staging/charts/eraser
$(MANIFEST_KUSTOMIZE) build /eraser/config/default -o /eraser/manifest_staging/deploy/eraser.yaml
$(HELM_KUSTOMIZE) build \
--load_restrictor LoadRestrictionsNone /eraser/third_party/open-policy-agent/gatekeeper/helmify | \
go run third_party/open-policy-agent/gatekeeper/helmify/*.go
# Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method
# implementations. Also generate conversions between structs of different API versions.
generate: __conversion-gen __controller-gen
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./api/..."
$(CONVERSION_GEN) \
--output-base=/eraser \
--input-dirs=$(API_VERSIONS) \
--go-header-file=./hack/boilerplate.go.txt \
--output-file-base=zz_generated.conversion
fmt: ## Run go fmt against code.
go fmt ./...
vet: ## Run go vet against code.
go vet ./...
test: manifests generate fmt vet envtest ## Run unit tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out
busybox-img:
docker build -t busybox-e2e-test:latest \
-f test/e2e/test-data/Dockerfile.busybox \
--build-arg IMG=$(BUSYBOX_BASE_IMG) test/e2e/test-data
BUSYBOX_IMG=busybox-e2e-test:latest
collector-dummy-img:
docker build -t $(COLLECTOR_REPO):dummy \
-f test/e2e/test-data/Dockerfile.dummyCollector \
test/e2e/test-data
COLLECTOR_IMAGE_DUMMY=$(COLLECTOR_REPO):dummy
vulnerable-img:
docker pull $(VULNERABLE_IMG)
eol-img:
docker pull $(EOL_IMG)
non-vulnerable-img:
docker buildx build \
$(_CACHE_FROM) $(_CACHE_TO) \
--build-arg LDFLAGS="$(LDFLAGS)" \
--platform="$(PLATFORM)" \
--output=$(OUTPUT_TYPE) \
-t ${NON_VULNERABLE_IMG} \
--target non-vulnerable .
custom-node-v$(KUBERNETES_VERSION):
docker build -t custom-node:v$(KUBERNETES_VERSION) \
-f test/e2e/test-data/Dockerfile.customNode \
--build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} test/e2e/test-data
MODIFIED_NODE_IMAGE=custom-node:v$(KUBERNETES_VERSION)
e2e-test: vulnerable-img eol-img non-vulnerable-img busybox-img collector-dummy-img custom-node-v$(KUBERNETES_VERSION)
for test in $(E2E_TESTS); do \
CGO_ENABLED=0 \
PROJECT_ABSOLUTE_PATH=$(CURDIR) \
REMOVER_TARBALL_PATH=${REMOVER_TARBALL_PATH} \
MANAGER_TARBALL_PATH=${MANAGER_TARBALL_PATH} \
COLLECTOR_TARBALL_PATH=${COLLECTOR_TARBALL_PATH} \
SCANNER_TARBALL_PATH=${SCANNER_TARBALL_PATH} \
HELM_UPGRADE_TEST=${HELM_UPGRADE_TEST} \
REMOVER_IMAGE=${REMOVER_IMG} \
MANAGER_IMAGE=${MANAGER_IMG} \
COLLECTOR_IMAGE=${COLLECTOR_IMG} \
SCANNER_IMAGE=${TRIVY_SCANNER_IMG} \
BUSYBOX_IMAGE=${BUSYBOX_IMG} \
COLLECTOR_IMAGE_DUMMY=${COLLECTOR_IMAGE_DUMMY} \
VULNERABLE_IMAGE=${VULNERABLE_IMG} \
NON_VULNERABLE_IMAGE=${NON_VULNERABLE_IMG} \
EOL_IMAGE=${EOL_IMG} \
NODE_VERSION=kindest/node:v${KUBERNETES_VERSION} \
MODIFIED_NODE_IMAGE=${MODIFIED_NODE_IMAGE} \
TEST_LOGDIR=${TEST_LOGDIR} \
go test -count=$(TEST_COUNT) -timeout=$(TIMEOUT) $(TESTFLAGS) -tags=e2e -v $$test ; \
done
##@ Build
build: generate fmt vet ## Build manager binary.
go build -o bin/manager -ldflags "$(LDFLAGS)" main.go
run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go
docker-build-manager: ## Build docker image with the manager.
docker buildx build \
$(_CACHE_FROM) $(_CACHE_TO) \
$(_ATTESTATIONS) \
--build-arg LDFLAGS="$(LDFLAGS)" \
--platform="$(PLATFORM)" \
--output=$(OUTPUT_TYPE) \
-t ${MANAGER_IMG} \
--target manager .
docker-build-trivy-scanner: ## Build docker image for trivy-scanner image.
docker buildx build \
$(_CACHE_FROM) $(_CACHE_TO) \
$(_ATTESTATIONS) \
--build-arg TRIVY_BINARY_IMG="$(TRIVY_BINARY_IMG)" \
--build-arg LDFLAGS="$(TRIVY_SCANNER_LDFLAGS)" \
--platform="$(PLATFORM)" \
--output=$(OUTPUT_TYPE) \
-t ${TRIVY_SCANNER_IMG} \
--target trivy-scanner .
docker-build-remover: ## Build docker image for remover image.
docker buildx build \
$(_CACHE_FROM) $(_CACHE_TO) \
$(_ATTESTATIONS) \
--build-arg LDFLAGS="$(ERASER_LDFLAGS)" \
--platform="$(PLATFORM)" \
--output=$(OUTPUT_TYPE) \
-t ${REMOVER_IMG} \
--target remover .
docker-build-collector:
docker buildx build \
$(_CACHE_FROM) $(_CACHE_TO) \
$(_ATTESTATIONS) \
--build-arg LDFLAGS="$(LDFLAGS)" \
--platform="$(PLATFORM)" \
--output=$(OUTPUT_TYPE) \
-t ${COLLECTOR_IMG} \
--target collector .
##@ Deployment
install: __manifest_kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
$(MANIFEST_KUSTOMIZE) build /eraser/config/crd | kubectl apply -f -
uninstall: __manifest_kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
$(MANIFEST_KUSTOMIZE) build /eraser/config/crd | kubectl delete -f -
deploy: __manifest_kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
$(MANIFEST_KUSTOMIZE) build /eraser/config/default | kubectl apply -f -
undeploy: __manifest_kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config.
$(MANIFEST_KUSTOMIZE) build /eraser/config/default | kubectl delete -f -
##@ Release
release-manifest: ## Generates manifests for a release.
@sed -i -e 's/^VERSION := .*/VERSION := ${NEWVERSION}/' ./Makefile
@sed -i'' -e 's@image: $(REPOSITORY):.*@image: $(REPOSITORY):'"$(NEWVERSION)"'@' ./config/manager/manager.yaml
@sed -i "s/appVersion: .*/appVersion: ${NEWVERSION}/" ./third_party/open-policy-agent/gatekeeper/helmify/static/Chart.yaml
@sed -i "s/version: .*/version: $$(echo ${NEWVERSION} | cut -c2-)/" ./third_party/open-policy-agent/gatekeeper/helmify/static/Chart.yaml
@sed -Ei 's/(tag:\s*).*/\1"$(NEWVERSION)"/' ./third_party/open-policy-agent/gatekeeper/helmify/static/values.yaml
@sed -i 's/Current release version: `.*`/Current release version: `'"${NEWVERSION}"'`/' ./third_party/open-policy-agent/gatekeeper/helmify/static/README.md
@sed -i 's/https:\/\/raw\.githubusercontent\.com\/eraser-dev\/eraser\/master\/deploy\/eraser\.yaml.*/https:\/\/raw\.githubusercontent\.com\/eraser-dev\/eraser\/${NEWVERSION}\/deploy\/eraser\.yaml/' ./docs/docs/installation.md
export
$(MAKE) manifests
promote-staging-manifest: ## Promotes the k8s deployment yaml files to release.
@rm -rf deploy
@cp -r manifest_staging/deploy .
@rm -rf charts
@cp -r manifest_staging/charts .
ENVTEST = $(shell pwd)/bin/setup-envtest
.PHONY: envtest
envtest: __tooling-image bin/setup-envtest
bin/setup-envtest:
docker run --rm -v $(shell pwd)/bin:/go/bin -e GO111MODULE=on eraser-tooling go install sigs.k8s.io/controller-runtime/tools/setup-envtest@v0.0.0-20240320141353-395cfc7486e6
__controller-gen: __tooling-image
CONTROLLER_GEN=docker run --rm -v $(shell pwd):/eraser eraser-tooling controller-gen
__conversion-gen: __tooling-image
CONVERSION_GEN=docker run --rm -v $(shell pwd):/eraser eraser-tooling conversion-gen
__manifest_kustomize: __kustomize-manifest-image
MANIFEST_KUSTOMIZE=docker run --rm -v $(shell pwd)/manifest_staging:/eraser/manifest_staging manifest-kustomize
__helm_kustomize: __kustomize-helm-image
HELM_KUSTOMIZE=docker run --rm -v $(shell pwd)/manifest_staging:/eraser/manifest_staging -v $(shell pwd)/third_party:/eraser/third_party helm-kustomize
__tooling-image:
docker build . \
-t eraser-tooling \
-f build/tooling/Dockerfile
__kustomize-helm-image:
docker build . \
-t helm-kustomize \
--build-arg KUSTOMIZE_VERSION=${KUSTOMIZE_VERSION} \
-f build/tooling/Dockerfile.helm
__kustomize-manifest-image:
docker build . \
-t manifest-kustomize \
--build-arg KUSTOMIZE_VERSION=${KUSTOMIZE_VERSION} \
--build-arg TRIVY_SCANNER_REPO=${TRIVY_SCANNER_REPO} \
--build-arg MANAGER_REPO=${MANAGER_REPO} \
--build-arg REMOVER_REPO=${REMOVER_REPO} \
--build-arg COLLECTOR_REPO=${COLLECTOR_REPO} \
--build-arg MANAGER_TAG=${MANAGER_TAG} \
--build-arg TRIVY_SCANNER_TAG=${TRIVY_SCANNER_TAG} \
--build-arg COLLECTOR_TAG=${COLLECTOR_TAG} \
--build-arg REMOVER_TAG=${REMOVER_TAG} \
-f build/tooling/Dockerfile.manifest
# Tags a new version for docs
.PHONY: version-docs
version-docs:
docker run --rm \
-v $(shell pwd)/docs:/docs \
-w /docs \
$(IDFLAGS) \
node:${NODE_VERSION} \
sh -c "yarn install --frozen-lockfile && yarn run docusaurus docs:version ${NEWVERSION}"
@sed -i 's/https:\/\/raw\.githubusercontent\.com\/eraser-dev\/eraser\/main\/deploy\/eraser\.yaml.*/https:\/\/raw\.githubusercontent\.com\/eraser-dev\/eraser\/${TAG}\/deploy\/eraser\.yaml/' ./docs/versioned_docs/version-${NEWVERSION}/installation.md
.PHONY: patch-version-docs
patch-version-docs:
@sed -i 's/https:\/\/raw\.githubusercontent\.com\/eraser-dev\/eraser\/${OLDVERSION}\/deploy\/eraser\.yaml.*/https:\/\/raw\.githubusercontent\.com\/eraser-dev\/eraser\/${TAG}\/deploy\/eraser\.yaml/' ./docs/versioned_docs/version-${NEWVERSION}/installation.md
================================================
FILE: PROJECT
================================================
domain: eraser-dev.io
layout:
- go.kubebuilder.io/v3
projectName: eraser
repo: github.com/eraser-dev/eraser
resources:
- api:
crdVersion: v1alpha1
namespaced: true
controller: true
domain: eraser-dev.io
group: eraser.sh
kind: ImageList
path: eraser.io/eraser/api/v1alpha1
version: v1alpha1
- controller: true
domain: eraser-dev.io
group: eraser.sh
kind: ImageCollector
version: v1alpha1
- domain: eraser-dev.io
group: eraser.sh
kind: ImageList
version: v1
- api:
crdVersion: v1
namespaced: true
domain: eraser.io
group: eraser.sh
kind: EraserConfig
path: github.com/eraser-dev/eraser/api/v1alpha1
version: v1alpha1
- domain: eraser.io
group: eraser.sh
kind: EraserConfig
version: v1alpha2
version: "3"
================================================
FILE: README.md
================================================
# Eraser: Cleaning up Images from Kubernetes Nodes

[](https://app.fossa.com/projects/git%2Bgithub.com%2Feraser-dev%2Feraser?ref=badge_shield)
[](https://www.bestpractices.dev/projects/7622)
[](https://api.securityscorecards.dev/projects/github.com/eraser-dev/eraser)
Eraser helps Kubernetes admins remove a list of non-running images from all Kubernetes nodes in a cluster.
## Getting started
You can find a quick start guide in the Eraser [documentation](https://eraser-dev.github.io/eraser/docs/quick-start).
## Demo

## Contributing
There are several ways to get involved:
- Join the [mailing list](https://groups.google.com/u/1/g/eraser-dev) to get notifications for releases, security announcements, etc.
- Join the [biweekly community meetings](https://docs.google.com/document/d/1Sj5u47K3WUGYNPmQHGFpb52auqZb1FxSlWAQnPADhWI/edit) to discuss development, issues, use cases, etc.
- Join the `#eraser` channel on the [Kubernetes Slack](https://kubernetes.slack.com/archives/C03Q8KV8YQ4)
- View the development setup instructions in the [documentation](https://eraser-dev.github.io/eraser/docs/setup)
This project welcomes contributions and suggestions.
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
## Support
### How to file issues and get help
This project uses GitHub Issues to track bugs and feature requests. Please search the [existing issues](https://github.com/eraser-dev/eraser/issues) before filing new issues to avoid duplicates. For new issues, file your bug or feature request as a new Issue.
The Eraser maintainers will respond to the best of their abilities.
================================================
FILE: api/group.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apis
================================================
FILE: api/unversioned/config/config.go
================================================
package config
import (
"fmt"
"sync"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/eraser-dev/eraser/api/unversioned"
"github.com/eraser-dev/eraser/version"
)
var defaultScannerConfig = `
cacheDir: /var/lib/trivy
dbRepo: ghcr.io/aquasecurity/trivy-db
deleteFailedImages: true
deleteEOLImages: true
vulnerabilities:
ignoreUnfixed: false
types:
- os
- library
securityChecks: # need to be documented; determined by trivy, not us
- vuln
severities:
- CRITICAL
- HIGH
- MEDIUM
- LOW
ignoredStatuses:
`
type Manager struct {
mtx sync.Mutex
cfg *unversioned.EraserConfig
}
func (m *Manager) Read() (unversioned.EraserConfig, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
if m.cfg == nil {
return unversioned.EraserConfig{}, fmt.Errorf("ConfigManager configuration is nil, aborting")
}
cfg := *m.cfg
return cfg, nil
}
func (m *Manager) Update(newC *unversioned.EraserConfig) error {
m.mtx.Lock()
defer m.mtx.Unlock()
if m.cfg == nil {
return fmt.Errorf("ConfigManager configuration is nil, aborting")
}
if newC == nil {
return fmt.Errorf("new configuration is nil, aborting")
}
*m.cfg = *newC
return nil
}
func NewManager(cfg *unversioned.EraserConfig) *Manager {
return &Manager{
mtx: sync.Mutex{},
cfg: cfg,
}
}
const (
noDelay = unversioned.Duration(0)
oneDay = unversioned.Duration(time.Hour * 24)
)
func Default() *unversioned.EraserConfig {
return &unversioned.EraserConfig{
Manager: unversioned.ManagerConfig{
Runtime: unversioned.RuntimeSpec{
Name: unversioned.RuntimeContainerd,
Address: "unix:///run/containerd/containerd.sock",
},
OTLPEndpoint: "",
LogLevel: "info",
Scheduling: unversioned.ScheduleConfig{
RepeatInterval: unversioned.Duration(oneDay),
BeginImmediately: true,
},
Profile: unversioned.ProfileConfig{
Enabled: false,
Port: 6060,
},
ImageJob: unversioned.ImageJobConfig{
SuccessRatio: 1.0,
Cleanup: unversioned.ImageJobCleanupConfig{
DelayOnSuccess: noDelay,
DelayOnFailure: oneDay,
},
},
PullSecrets: []string{},
NodeFilter: unversioned.NodeFilterConfig{
Type: "exclude",
Selectors: []string{
"eraser.sh/cleanup.filter",
},
},
AdditionalPodLabels: map[string]string{},
},
Components: unversioned.Components{
Collector: unversioned.OptionalContainerConfig{
Enabled: false,
ContainerConfig: unversioned.ContainerConfig{
Image: unversioned.RepoTag{
Repo: repo("collector"),
Tag: version.BuildVersion,
},
Request: unversioned.ResourceRequirements{
Mem: resource.MustParse("25Mi"),
CPU: resource.MustParse("7m"),
},
Limit: unversioned.ResourceRequirements{
Mem: resource.MustParse("500Mi"),
CPU: resource.Quantity{},
},
Config: nil,
},
},
Scanner: unversioned.OptionalContainerConfig{
Enabled: false,
ContainerConfig: unversioned.ContainerConfig{
Image: unversioned.RepoTag{
Repo: repo("eraser-trivy-scanner"),
Tag: version.BuildVersion,
},
Request: unversioned.ResourceRequirements{
Mem: resource.MustParse("500Mi"),
CPU: resource.MustParse("1000m"),
},
Limit: unversioned.ResourceRequirements{
Mem: resource.MustParse("2Gi"),
CPU: resource.MustParse("1500m"),
},
Config: &defaultScannerConfig,
Volumes: []v1.Volume{},
},
},
Remover: unversioned.ContainerConfig{
Image: unversioned.RepoTag{
Repo: repo("remover"),
Tag: version.BuildVersion,
},
Request: unversioned.ResourceRequirements{
Mem: resource.MustParse("25Mi"),
CPU: resource.MustParse("7m"),
},
Limit: unversioned.ResourceRequirements{
Mem: resource.MustParse("30Mi"),
CPU: resource.Quantity{},
},
Config: nil,
},
},
}
}
func repo(basename string) string {
if version.DefaultRepo == "" {
return basename
}
return fmt.Sprintf("%s/%s", version.DefaultRepo, basename)
}
================================================
FILE: api/unversioned/doc.go
================================================
package unversioned
// +kubebuilder:object:generate=true
================================================
FILE: api/unversioned/eraserconfig_types.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package unversioned
import (
"encoding/json"
"fmt"
"net/url"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type (
Duration time.Duration
Runtime string
RuntimeSpec struct {
Name Runtime `json:"name"`
Address string `json:"address"`
}
)
const (
RuntimeContainerd Runtime = "containerd"
RuntimeDockerShim Runtime = "dockershim"
RuntimeCrio Runtime = "crio"
RuntimeNotProvided Runtime = ""
ContainerdPath = "/run/containerd/containerd.sock"
DockerPath = "/run/dockershim.sock"
CrioPath = "/run/crio/crio.sock"
)
func ConvertRuntimeToRuntimeSpec(r Runtime) (RuntimeSpec, error) {
var rs RuntimeSpec
switch r {
case RuntimeContainerd:
rs = RuntimeSpec{Name: RuntimeContainerd, Address: fmt.Sprintf("unix://%s", ContainerdPath)}
case RuntimeDockerShim:
rs = RuntimeSpec{Name: RuntimeDockerShim, Address: fmt.Sprintf("unix://%s", DockerPath)}
case RuntimeCrio:
rs = RuntimeSpec{Name: RuntimeCrio, Address: fmt.Sprintf("unix://%s", CrioPath)}
default:
return rs, fmt.Errorf("invalid runtime: valid names are %s, %s, %s", RuntimeContainerd, RuntimeDockerShim, RuntimeCrio)
}
return rs, nil
}
func (td *Duration) UnmarshalJSON(b []byte) error {
var str string
err := json.Unmarshal(b, &str)
if err != nil {
return err
}
pd, err := time.ParseDuration(str)
if err != nil {
return err
}
*td = Duration(pd)
return nil
}
func (td *Duration) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, time.Duration(*td).String())), nil
}
func (r *RuntimeSpec) UnmarshalJSON(b []byte) error {
// create temp RuntimeSpec to prevent recursive error into this function when using unmarshall to check validity of provided RuntimeSpec
type TempRuntimeSpec struct {
Name string `json:"name"`
Address string `json:"address"`
}
var rs TempRuntimeSpec
err := json.Unmarshal(b, &rs)
if err != nil {
return fmt.Errorf("error unmarshalling into TempRuntimeSpec %v %s", err, string(b))
}
switch rt := Runtime(rs.Name); rt {
// make sure user provided Runtime is valid
case RuntimeContainerd, RuntimeDockerShim, RuntimeCrio:
if rs.Address != "" {
// check that provided RuntimeAddress is valid
u, err := url.Parse(rs.Address)
if err != nil {
return err
}
switch u.Scheme {
case "tcp", "unix":
default:
return fmt.Errorf("invalid RuntimeAddress scheme: valid schemes for runtime socket address are `tcp` and `unix`")
}
r.Name = Runtime(rs.Name)
r.Address = rs.Address
return nil
}
// if RuntimeAddress is not provided, get defaults
converted, err := ConvertRuntimeToRuntimeSpec(rt)
if err != nil {
return err
}
*r = converted
case RuntimeNotProvided:
if rs.Address != "" {
return fmt.Errorf("runtime name must be provided with address")
}
// if empty name and address, use containerd as default
r.Name = RuntimeContainerd
r.Address = fmt.Sprintf("unix://%s", ContainerdPath)
default:
return fmt.Errorf("invalid runtime: valid names are %s, %s, %s", RuntimeContainerd, RuntimeDockerShim, RuntimeCrio)
}
return nil
}
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
type OptionalContainerConfig struct {
Enabled bool `json:"enabled,omitempty"`
ContainerConfig `json:",inline"`
}
type ContainerConfig struct {
Image RepoTag `json:"image,omitempty"`
Request ResourceRequirements `json:"request,omitempty"`
Limit ResourceRequirements `json:"limit,omitempty"`
Config *string `json:"config,omitempty"`
Volumes []corev1.Volume `json:"volumes,omitempty"`
}
type ManagerConfig struct {
Runtime RuntimeSpec `json:"runtime,omitempty"`
OTLPEndpoint string `json:"otlpEndpoint,omitempty"`
LogLevel string `json:"logLevel,omitempty"`
Scheduling ScheduleConfig `json:"scheduling,omitempty"`
Profile ProfileConfig `json:"profile,omitempty"`
ImageJob ImageJobConfig `json:"imageJob,omitempty"`
PullSecrets []string `json:"pullSecrets,omitempty"`
NodeFilter NodeFilterConfig `json:"nodeFilter,omitempty"`
PriorityClassName string `json:"priorityClassName,omitempty"`
AdditionalPodLabels map[string]string `json:"additionalPodLabels,omitempty"`
}
type ScheduleConfig struct {
RepeatInterval Duration `json:"repeatInterval,omitempty"`
BeginImmediately bool `json:"beginImmediately,omitempty"`
}
type ProfileConfig struct {
Enabled bool `json:"enabled,omitempty"`
Port int `json:"port,omitempty"`
}
type ImageJobConfig struct {
SuccessRatio float64 `json:"successRatio,omitempty"`
Cleanup ImageJobCleanupConfig `json:"cleanup,omitempty"`
}
type ImageJobCleanupConfig struct {
DelayOnSuccess Duration `json:"delayOnSuccess,omitempty"`
DelayOnFailure Duration `json:"delayOnFailure,omitempty"`
}
type NodeFilterConfig struct {
Type string `json:"type,omitempty"`
Selectors []string `json:"selectors,omitempty"`
}
type ResourceRequirements struct {
Mem resource.Quantity `json:"mem,omitempty"`
CPU resource.Quantity `json:"cpu,omitempty"`
}
type RepoTag struct {
Repo string `json:"repo,omitempty"`
Tag string `json:"tag,omitempty"`
}
type Components struct {
Collector OptionalContainerConfig `json:"collector,omitempty"`
Scanner OptionalContainerConfig `json:"scanner,omitempty"`
Remover ContainerConfig `json:"remover,omitempty"`
}
//+kubebuilder:object:root=true
// EraserConfig is the Schema for the eraserconfigs API.
type EraserConfig struct {
metav1.TypeMeta `json:",inline"`
Manager ManagerConfig `json:"manager"`
Components Components `json:"components"`
}
func init() {
SchemeBuilder.Register(&EraserConfig{})
}
================================================
FILE: api/unversioned/groupversion_info.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1 contains API Schema definitions for the eraser.sh v1 API group
// +kubebuilder:object:generate=true
// +groupName=eraser.sh
package unversioned
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "eraser.sh", Version: "unversioned"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
================================================
FILE: api/unversioned/imagejob_types.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +kubebuilder:skip
package unversioned
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Image struct {
ImageID string `json:"image_id"`
Names []string `json:"names,omitempty"`
Digests []string `json:"digests,omitempty"`
}
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// JobPhase defines the phase of an ImageJob status.
type JobPhase string
const (
PhaseRunning JobPhase = "Running"
PhaseCompleted JobPhase = "Completed"
PhaseFailed JobPhase = "Failed"
)
// ImageJobStatus defines the observed state of ImageJob.
type ImageJobStatus struct {
// number of pods that failed
Failed int `json:"failed"`
// number of pods that completed successfully
Succeeded int `json:"succeeded"`
// desired number of pods
Desired int `json:"desired"`
// number of nodes that were skipped e.g. because they are not a linux node
Skipped int `json:"skipped"`
// job running, successfully completed, or failed
Phase JobPhase `json:"phase"`
// Time to delay deletion until
DeleteAfter *metav1.Time `json:"deleteAfter,omitempty"`
}
// ImageJob is the Schema for the imagejobs API.
type ImageJob struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Status ImageJobStatus `json:"status,omitempty"`
}
// ImageJobList contains a list of ImageJob.
type ImageJobList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ImageJob `json:"items"`
}
================================================
FILE: api/unversioned/imagelist_types.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +kubebuilder:skip
package unversioned
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ImageListSpec defines the desired state of ImageList.
type ImageListSpec struct {
// The list of non-compliant images to delete if non-running.
Images []string `json:"images"`
}
// ImageListStatus defines the observed state of ImageList.
type ImageListStatus struct {
// Information when the job was completed.
Timestamp *metav1.Time `json:"timestamp"`
// Number of nodes that successfully ran the job
Success int64 `json:"success"`
// Number of nodes that failed to run the job
Failed int64 `json:"failed"`
// Number of nodes that were skipped due to a skip selector
Skipped int64 `json:"skipped"`
}
// ImageList is the Schema for the imagelists API.
type ImageList struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ImageListSpec `json:"spec,omitempty"`
Status ImageListStatus `json:"status,omitempty"`
}
// ImageListList contains a list of ImageList.
type ImageListList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ImageList `json:"items"`
}
================================================
FILE: api/unversioned/zz_generated.deepcopy.go
================================================
//go:build !ignore_autogenerated
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package unversioned
import (
"k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Components) DeepCopyInto(out *Components) {
*out = *in
in.Collector.DeepCopyInto(&out.Collector)
in.Scanner.DeepCopyInto(&out.Scanner)
in.Remover.DeepCopyInto(&out.Remover)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Components.
func (in *Components) DeepCopy() *Components {
if in == nil {
return nil
}
out := new(Components)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerConfig) DeepCopyInto(out *ContainerConfig) {
*out = *in
out.Image = in.Image
in.Request.DeepCopyInto(&out.Request)
in.Limit.DeepCopyInto(&out.Limit)
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(string)
**out = **in
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerConfig.
func (in *ContainerConfig) DeepCopy() *ContainerConfig {
if in == nil {
return nil
}
out := new(ContainerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EraserConfig) DeepCopyInto(out *EraserConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Manager.DeepCopyInto(&out.Manager)
in.Components.DeepCopyInto(&out.Components)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EraserConfig.
func (in *EraserConfig) DeepCopy() *EraserConfig {
if in == nil {
return nil
}
out := new(EraserConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EraserConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Image) DeepCopyInto(out *Image) {
*out = *in
if in.Names != nil {
in, out := &in.Names, &out.Names
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Digests != nil {
in, out := &in.Digests, &out.Digests
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
func (in *Image) DeepCopy() *Image {
if in == nil {
return nil
}
out := new(Image)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJob) DeepCopyInto(out *ImageJob) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJob.
func (in *ImageJob) DeepCopy() *ImageJob {
if in == nil {
return nil
}
out := new(ImageJob)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobCleanupConfig) DeepCopyInto(out *ImageJobCleanupConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobCleanupConfig.
func (in *ImageJobCleanupConfig) DeepCopy() *ImageJobCleanupConfig {
if in == nil {
return nil
}
out := new(ImageJobCleanupConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobConfig) DeepCopyInto(out *ImageJobConfig) {
*out = *in
out.Cleanup = in.Cleanup
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobConfig.
func (in *ImageJobConfig) DeepCopy() *ImageJobConfig {
if in == nil {
return nil
}
out := new(ImageJobConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobList) DeepCopyInto(out *ImageJobList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ImageJob, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobList.
func (in *ImageJobList) DeepCopy() *ImageJobList {
if in == nil {
return nil
}
out := new(ImageJobList)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobStatus) DeepCopyInto(out *ImageJobStatus) {
*out = *in
if in.DeleteAfter != nil {
in, out := &in.DeleteAfter, &out.DeleteAfter
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobStatus.
func (in *ImageJobStatus) DeepCopy() *ImageJobStatus {
if in == nil {
return nil
}
out := new(ImageJobStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageList) DeepCopyInto(out *ImageList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList.
func (in *ImageList) DeepCopy() *ImageList {
if in == nil {
return nil
}
out := new(ImageList)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageListList) DeepCopyInto(out *ImageListList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ImageList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageListList.
func (in *ImageListList) DeepCopy() *ImageListList {
if in == nil {
return nil
}
out := new(ImageListList)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageListSpec) DeepCopyInto(out *ImageListSpec) {
*out = *in
if in.Images != nil {
in, out := &in.Images, &out.Images
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageListSpec.
func (in *ImageListSpec) DeepCopy() *ImageListSpec {
if in == nil {
return nil
}
out := new(ImageListSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageListStatus) DeepCopyInto(out *ImageListStatus) {
*out = *in
if in.Timestamp != nil {
in, out := &in.Timestamp, &out.Timestamp
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageListStatus.
func (in *ImageListStatus) DeepCopy() *ImageListStatus {
if in == nil {
return nil
}
out := new(ImageListStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManagerConfig) DeepCopyInto(out *ManagerConfig) {
*out = *in
out.Runtime = in.Runtime
out.Scheduling = in.Scheduling
out.Profile = in.Profile
out.ImageJob = in.ImageJob
if in.PullSecrets != nil {
in, out := &in.PullSecrets, &out.PullSecrets
*out = make([]string, len(*in))
copy(*out, *in)
}
in.NodeFilter.DeepCopyInto(&out.NodeFilter)
if in.AdditionalPodLabels != nil {
in, out := &in.AdditionalPodLabels, &out.AdditionalPodLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagerConfig.
func (in *ManagerConfig) DeepCopy() *ManagerConfig {
if in == nil {
return nil
}
out := new(ManagerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeFilterConfig) DeepCopyInto(out *NodeFilterConfig) {
*out = *in
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFilterConfig.
func (in *NodeFilterConfig) DeepCopy() *NodeFilterConfig {
if in == nil {
return nil
}
out := new(NodeFilterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OptionalContainerConfig) DeepCopyInto(out *OptionalContainerConfig) {
*out = *in
in.ContainerConfig.DeepCopyInto(&out.ContainerConfig)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalContainerConfig.
func (in *OptionalContainerConfig) DeepCopy() *OptionalContainerConfig {
if in == nil {
return nil
}
out := new(OptionalContainerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProfileConfig) DeepCopyInto(out *ProfileConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileConfig.
func (in *ProfileConfig) DeepCopy() *ProfileConfig {
if in == nil {
return nil
}
out := new(ProfileConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RepoTag) DeepCopyInto(out *RepoTag) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepoTag.
func (in *RepoTag) DeepCopy() *RepoTag {
if in == nil {
return nil
}
out := new(RepoTag)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) {
*out = *in
out.Mem = in.Mem.DeepCopy()
out.CPU = in.CPU.DeepCopy()
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements.
func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
if in == nil {
return nil
}
out := new(ResourceRequirements)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RuntimeSpec) DeepCopyInto(out *RuntimeSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeSpec.
func (in *RuntimeSpec) DeepCopy() *RuntimeSpec {
if in == nil {
return nil
}
out := new(RuntimeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduleConfig) DeepCopyInto(out *ScheduleConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleConfig.
func (in *ScheduleConfig) DeepCopy() *ScheduleConfig {
if in == nil {
return nil
}
out := new(ScheduleConfig)
in.DeepCopyInto(out)
return out
}
================================================
FILE: api/v1/doc.go
================================================
// +k8s:conversion-gen=github.com/eraser-dev/eraser/api/unversioned
package v1
================================================
FILE: api/v1/groupversion_info.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1 contains API Schema definitions for the eraser.sh v1 API group
// +kubebuilder:object:generate=true
// +groupName=eraser.sh
package v1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "eraser.sh", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
localSchemeBuilder = runtime.NewSchemeBuilder(SchemeBuilder.AddToScheme)
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
================================================
FILE: api/v1/imagejob_types.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Image struct {
ImageID string `json:"image_id"`
Names []string `json:"names,omitempty"`
Digests []string `json:"digests,omitempty"`
}
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// JobPhase defines the phase of an ImageJob status.
type JobPhase string
const (
PhaseRunning JobPhase = "Running"
PhaseCompleted JobPhase = "Completed"
PhaseFailed JobPhase = "Failed"
)
// ImageJobStatus defines the observed state of ImageJob.
type ImageJobStatus struct {
// number of pods that failed
Failed int `json:"failed"`
// number of pods that completed successfully
Succeeded int `json:"succeeded"`
// desired number of pods
Desired int `json:"desired"`
// number of nodes that were skipped e.g. because they are not a linux node
Skipped int `json:"skipped"`
// job running, successfully completed, or failed
Phase JobPhase `json:"phase"`
// Time to delay deletion until
DeleteAfter *metav1.Time `json:"deleteAfter,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope="Cluster"
// +kubebuilder:storageversion
// ImageJob is the Schema for the imagejobs API.
type ImageJob struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Status ImageJobStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// ImageJobList contains a list of ImageJob.
type ImageJobList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ImageJob `json:"items"`
}
func init() {
SchemeBuilder.Register(&ImageJob{}, &ImageJobList{})
}
================================================
FILE: api/v1/imagelist_types.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ImageListSpec defines the desired state of ImageList.
type ImageListSpec struct {
// The list of non-compliant images to delete if non-running.
Images []string `json:"images"`
}
// ImageListStatus defines the observed state of ImageList.
type ImageListStatus struct {
// Information when the job was completed.
Timestamp *metav1.Time `json:"timestamp"`
// Number of nodes that successfully ran the job
Success int64 `json:"success"`
// Number of nodes that failed to run the job
Failed int64 `json:"failed"`
// Number of nodes that were skipped due to a skip selector
Skipped int64 `json:"skipped"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope="Cluster"
// +kubebuilder:storageversion
// ImageList is the Schema for the imagelists API.
type ImageList struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ImageListSpec `json:"spec,omitempty"`
Status ImageListStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// ImageListList contains a list of ImageList.
type ImageListList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ImageList `json:"items"`
}
func init() {
SchemeBuilder.Register(&ImageList{}, &ImageListList{})
}
================================================
FILE: api/v1/zz_generated.conversion.go
================================================
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1
import (
unsafe "unsafe"
unversioned "github.com/eraser-dev/eraser/api/unversioned"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*Image)(nil), (*unversioned.Image)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_Image_To_unversioned_Image(a.(*Image), b.(*unversioned.Image), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.Image)(nil), (*Image)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_Image_To_v1_Image(a.(*unversioned.Image), b.(*Image), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJob)(nil), (*unversioned.ImageJob)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ImageJob_To_unversioned_ImageJob(a.(*ImageJob), b.(*unversioned.ImageJob), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJob)(nil), (*ImageJob)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJob_To_v1_ImageJob(a.(*unversioned.ImageJob), b.(*ImageJob), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJobList)(nil), (*unversioned.ImageJobList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ImageJobList_To_unversioned_ImageJobList(a.(*ImageJobList), b.(*unversioned.ImageJobList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJobList)(nil), (*ImageJobList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJobList_To_v1_ImageJobList(a.(*unversioned.ImageJobList), b.(*ImageJobList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJobStatus)(nil), (*unversioned.ImageJobStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ImageJobStatus_To_unversioned_ImageJobStatus(a.(*ImageJobStatus), b.(*unversioned.ImageJobStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJobStatus)(nil), (*ImageJobStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJobStatus_To_v1_ImageJobStatus(a.(*unversioned.ImageJobStatus), b.(*ImageJobStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageList)(nil), (*unversioned.ImageList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ImageList_To_unversioned_ImageList(a.(*ImageList), b.(*unversioned.ImageList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageList)(nil), (*ImageList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageList_To_v1_ImageList(a.(*unversioned.ImageList), b.(*ImageList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageListList)(nil), (*unversioned.ImageListList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ImageListList_To_unversioned_ImageListList(a.(*ImageListList), b.(*unversioned.ImageListList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageListList)(nil), (*ImageListList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageListList_To_v1_ImageListList(a.(*unversioned.ImageListList), b.(*ImageListList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageListSpec)(nil), (*unversioned.ImageListSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ImageListSpec_To_unversioned_ImageListSpec(a.(*ImageListSpec), b.(*unversioned.ImageListSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageListSpec)(nil), (*ImageListSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageListSpec_To_v1_ImageListSpec(a.(*unversioned.ImageListSpec), b.(*ImageListSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageListStatus)(nil), (*unversioned.ImageListStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_ImageListStatus_To_unversioned_ImageListStatus(a.(*ImageListStatus), b.(*unversioned.ImageListStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageListStatus)(nil), (*ImageListStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageListStatus_To_v1_ImageListStatus(a.(*unversioned.ImageListStatus), b.(*ImageListStatus), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1_Image_To_unversioned_Image(in *Image, out *unversioned.Image, s conversion.Scope) error {
out.ImageID = in.ImageID
out.Names = *(*[]string)(unsafe.Pointer(&in.Names))
out.Digests = *(*[]string)(unsafe.Pointer(&in.Digests))
return nil
}
// Convert_v1_Image_To_unversioned_Image is an autogenerated conversion function.
func Convert_v1_Image_To_unversioned_Image(in *Image, out *unversioned.Image, s conversion.Scope) error {
return autoConvert_v1_Image_To_unversioned_Image(in, out, s)
}
func autoConvert_unversioned_Image_To_v1_Image(in *unversioned.Image, out *Image, s conversion.Scope) error {
out.ImageID = in.ImageID
out.Names = *(*[]string)(unsafe.Pointer(&in.Names))
out.Digests = *(*[]string)(unsafe.Pointer(&in.Digests))
return nil
}
// Convert_unversioned_Image_To_v1_Image is an autogenerated conversion function.
func Convert_unversioned_Image_To_v1_Image(in *unversioned.Image, out *Image, s conversion.Scope) error {
return autoConvert_unversioned_Image_To_v1_Image(in, out, s)
}
func autoConvert_v1_ImageJob_To_unversioned_ImageJob(in *ImageJob, out *unversioned.ImageJob, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ImageJobStatus_To_unversioned_ImageJobStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_ImageJob_To_unversioned_ImageJob is an autogenerated conversion function.
func Convert_v1_ImageJob_To_unversioned_ImageJob(in *ImageJob, out *unversioned.ImageJob, s conversion.Scope) error {
return autoConvert_v1_ImageJob_To_unversioned_ImageJob(in, out, s)
}
func autoConvert_unversioned_ImageJob_To_v1_ImageJob(in *unversioned.ImageJob, out *ImageJob, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_unversioned_ImageJobStatus_To_v1_ImageJobStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_ImageJob_To_v1_ImageJob is an autogenerated conversion function.
func Convert_unversioned_ImageJob_To_v1_ImageJob(in *unversioned.ImageJob, out *ImageJob, s conversion.Scope) error {
return autoConvert_unversioned_ImageJob_To_v1_ImageJob(in, out, s)
}
func autoConvert_v1_ImageJobList_To_unversioned_ImageJobList(in *ImageJobList, out *unversioned.ImageJobList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]unversioned.ImageJob)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ImageJobList_To_unversioned_ImageJobList is an autogenerated conversion function.
func Convert_v1_ImageJobList_To_unversioned_ImageJobList(in *ImageJobList, out *unversioned.ImageJobList, s conversion.Scope) error {
return autoConvert_v1_ImageJobList_To_unversioned_ImageJobList(in, out, s)
}
func autoConvert_unversioned_ImageJobList_To_v1_ImageJobList(in *unversioned.ImageJobList, out *ImageJobList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]ImageJob)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_unversioned_ImageJobList_To_v1_ImageJobList is an autogenerated conversion function.
func Convert_unversioned_ImageJobList_To_v1_ImageJobList(in *unversioned.ImageJobList, out *ImageJobList, s conversion.Scope) error {
return autoConvert_unversioned_ImageJobList_To_v1_ImageJobList(in, out, s)
}
func autoConvert_v1_ImageJobStatus_To_unversioned_ImageJobStatus(in *ImageJobStatus, out *unversioned.ImageJobStatus, s conversion.Scope) error {
out.Failed = in.Failed
out.Succeeded = in.Succeeded
out.Desired = in.Desired
out.Skipped = in.Skipped
out.Phase = unversioned.JobPhase(in.Phase)
out.DeleteAfter = (*metav1.Time)(unsafe.Pointer(in.DeleteAfter))
return nil
}
// Convert_v1_ImageJobStatus_To_unversioned_ImageJobStatus is an autogenerated conversion function.
func Convert_v1_ImageJobStatus_To_unversioned_ImageJobStatus(in *ImageJobStatus, out *unversioned.ImageJobStatus, s conversion.Scope) error {
return autoConvert_v1_ImageJobStatus_To_unversioned_ImageJobStatus(in, out, s)
}
func autoConvert_unversioned_ImageJobStatus_To_v1_ImageJobStatus(in *unversioned.ImageJobStatus, out *ImageJobStatus, s conversion.Scope) error {
out.Failed = in.Failed
out.Succeeded = in.Succeeded
out.Desired = in.Desired
out.Skipped = in.Skipped
out.Phase = JobPhase(in.Phase)
out.DeleteAfter = (*metav1.Time)(unsafe.Pointer(in.DeleteAfter))
return nil
}
// Convert_unversioned_ImageJobStatus_To_v1_ImageJobStatus is an autogenerated conversion function.
func Convert_unversioned_ImageJobStatus_To_v1_ImageJobStatus(in *unversioned.ImageJobStatus, out *ImageJobStatus, s conversion.Scope) error {
return autoConvert_unversioned_ImageJobStatus_To_v1_ImageJobStatus(in, out, s)
}
func autoConvert_v1_ImageList_To_unversioned_ImageList(in *ImageList, out *unversioned.ImageList, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1_ImageListSpec_To_unversioned_ImageListSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1_ImageListStatus_To_unversioned_ImageListStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1_ImageList_To_unversioned_ImageList is an autogenerated conversion function.
func Convert_v1_ImageList_To_unversioned_ImageList(in *ImageList, out *unversioned.ImageList, s conversion.Scope) error {
return autoConvert_v1_ImageList_To_unversioned_ImageList(in, out, s)
}
func autoConvert_unversioned_ImageList_To_v1_ImageList(in *unversioned.ImageList, out *ImageList, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_unversioned_ImageListSpec_To_v1_ImageListSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_unversioned_ImageListStatus_To_v1_ImageListStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_ImageList_To_v1_ImageList is an autogenerated conversion function.
func Convert_unversioned_ImageList_To_v1_ImageList(in *unversioned.ImageList, out *ImageList, s conversion.Scope) error {
return autoConvert_unversioned_ImageList_To_v1_ImageList(in, out, s)
}
func autoConvert_v1_ImageListList_To_unversioned_ImageListList(in *ImageListList, out *unversioned.ImageListList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]unversioned.ImageList)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1_ImageListList_To_unversioned_ImageListList is an autogenerated conversion function.
func Convert_v1_ImageListList_To_unversioned_ImageListList(in *ImageListList, out *unversioned.ImageListList, s conversion.Scope) error {
return autoConvert_v1_ImageListList_To_unversioned_ImageListList(in, out, s)
}
func autoConvert_unversioned_ImageListList_To_v1_ImageListList(in *unversioned.ImageListList, out *ImageListList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]ImageList)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_unversioned_ImageListList_To_v1_ImageListList is an autogenerated conversion function.
func Convert_unversioned_ImageListList_To_v1_ImageListList(in *unversioned.ImageListList, out *ImageListList, s conversion.Scope) error {
return autoConvert_unversioned_ImageListList_To_v1_ImageListList(in, out, s)
}
func autoConvert_v1_ImageListSpec_To_unversioned_ImageListSpec(in *ImageListSpec, out *unversioned.ImageListSpec, s conversion.Scope) error {
out.Images = *(*[]string)(unsafe.Pointer(&in.Images))
return nil
}
// Convert_v1_ImageListSpec_To_unversioned_ImageListSpec is an autogenerated conversion function.
func Convert_v1_ImageListSpec_To_unversioned_ImageListSpec(in *ImageListSpec, out *unversioned.ImageListSpec, s conversion.Scope) error {
return autoConvert_v1_ImageListSpec_To_unversioned_ImageListSpec(in, out, s)
}
func autoConvert_unversioned_ImageListSpec_To_v1_ImageListSpec(in *unversioned.ImageListSpec, out *ImageListSpec, s conversion.Scope) error {
out.Images = *(*[]string)(unsafe.Pointer(&in.Images))
return nil
}
// Convert_unversioned_ImageListSpec_To_v1_ImageListSpec is an autogenerated conversion function.
func Convert_unversioned_ImageListSpec_To_v1_ImageListSpec(in *unversioned.ImageListSpec, out *ImageListSpec, s conversion.Scope) error {
return autoConvert_unversioned_ImageListSpec_To_v1_ImageListSpec(in, out, s)
}
func autoConvert_v1_ImageListStatus_To_unversioned_ImageListStatus(in *ImageListStatus, out *unversioned.ImageListStatus, s conversion.Scope) error {
out.Timestamp = (*metav1.Time)(unsafe.Pointer(in.Timestamp))
out.Success = in.Success
out.Failed = in.Failed
out.Skipped = in.Skipped
return nil
}
// Convert_v1_ImageListStatus_To_unversioned_ImageListStatus is an autogenerated conversion function.
func Convert_v1_ImageListStatus_To_unversioned_ImageListStatus(in *ImageListStatus, out *unversioned.ImageListStatus, s conversion.Scope) error {
return autoConvert_v1_ImageListStatus_To_unversioned_ImageListStatus(in, out, s)
}
func autoConvert_unversioned_ImageListStatus_To_v1_ImageListStatus(in *unversioned.ImageListStatus, out *ImageListStatus, s conversion.Scope) error {
out.Timestamp = (*metav1.Time)(unsafe.Pointer(in.Timestamp))
out.Success = in.Success
out.Failed = in.Failed
out.Skipped = in.Skipped
return nil
}
// Convert_unversioned_ImageListStatus_To_v1_ImageListStatus is an autogenerated conversion function.
func Convert_unversioned_ImageListStatus_To_v1_ImageListStatus(in *unversioned.ImageListStatus, out *ImageListStatus, s conversion.Scope) error {
return autoConvert_unversioned_ImageListStatus_To_v1_ImageListStatus(in, out, s)
}
================================================
FILE: api/v1/zz_generated.deepcopy.go
================================================
//go:build !ignore_autogenerated
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Image) DeepCopyInto(out *Image) {
*out = *in
if in.Names != nil {
in, out := &in.Names, &out.Names
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Digests != nil {
in, out := &in.Digests, &out.Digests
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
func (in *Image) DeepCopy() *Image {
if in == nil {
return nil
}
out := new(Image)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJob) DeepCopyInto(out *ImageJob) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJob.
func (in *ImageJob) DeepCopy() *ImageJob {
if in == nil {
return nil
}
out := new(ImageJob)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageJob) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobList) DeepCopyInto(out *ImageJobList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ImageJob, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobList.
func (in *ImageJobList) DeepCopy() *ImageJobList {
if in == nil {
return nil
}
out := new(ImageJobList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageJobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobStatus) DeepCopyInto(out *ImageJobStatus) {
*out = *in
if in.DeleteAfter != nil {
in, out := &in.DeleteAfter, &out.DeleteAfter
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobStatus.
func (in *ImageJobStatus) DeepCopy() *ImageJobStatus {
if in == nil {
return nil
}
out := new(ImageJobStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageList) DeepCopyInto(out *ImageList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList.
func (in *ImageList) DeepCopy() *ImageList {
if in == nil {
return nil
}
out := new(ImageList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageListList) DeepCopyInto(out *ImageListList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ImageList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageListList.
func (in *ImageListList) DeepCopy() *ImageListList {
if in == nil {
return nil
}
out := new(ImageListList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageListList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageListSpec) DeepCopyInto(out *ImageListSpec) {
*out = *in
if in.Images != nil {
in, out := &in.Images, &out.Images
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageListSpec.
func (in *ImageListSpec) DeepCopy() *ImageListSpec {
if in == nil {
return nil
}
out := new(ImageListSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageListStatus) DeepCopyInto(out *ImageListStatus) {
*out = *in
if in.Timestamp != nil {
in, out := &in.Timestamp, &out.Timestamp
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageListStatus.
func (in *ImageListStatus) DeepCopy() *ImageListStatus {
if in == nil {
return nil
}
out := new(ImageListStatus)
in.DeepCopyInto(out)
return out
}
================================================
FILE: api/v1alpha1/config/config.go
================================================
package config
import (
"fmt"
"time"
v1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/version"
"k8s.io/apimachinery/pkg/api/resource"
)
var defaultScannerConfig = `
cacheDir: /var/lib/trivy
dbRepo: ghcr.io/aquasecurity/trivy-db
deleteFailedImages: true
deleteEOLImages: true
vulnerabilities:
ignoreUnfixed: false
types:
- os
- library
securityChecks: # need to be documented; determined by trivy, not us
- vuln
severities:
- CRITICAL
- HIGH
- MEDIUM
- LOW
`
const (
noDelay = v1alpha1.Duration(0)
oneDay = v1alpha1.Duration(time.Hour * 24)
)
func Default() *v1alpha1.EraserConfig {
return &v1alpha1.EraserConfig{
Manager: v1alpha1.ManagerConfig{
Runtime: "containerd",
OTLPEndpoint: "",
LogLevel: "info",
Scheduling: v1alpha1.ScheduleConfig{
RepeatInterval: v1alpha1.Duration(oneDay),
BeginImmediately: true,
},
Profile: v1alpha1.ProfileConfig{
Enabled: false,
Port: 6060,
},
ImageJob: v1alpha1.ImageJobConfig{
SuccessRatio: 1.0,
Cleanup: v1alpha1.ImageJobCleanupConfig{
DelayOnSuccess: noDelay,
DelayOnFailure: oneDay,
},
},
PullSecrets: []string{},
NodeFilter: v1alpha1.NodeFilterConfig{
Type: "exclude",
Selectors: []string{
"eraser.sh/cleanup.filter",
},
},
},
Components: v1alpha1.Components{
Collector: v1alpha1.OptionalContainerConfig{
Enabled: false,
ContainerConfig: v1alpha1.ContainerConfig{
Image: v1alpha1.RepoTag{
Repo: repo("collector"),
Tag: version.BuildVersion,
},
Request: v1alpha1.ResourceRequirements{
Mem: resource.MustParse("25Mi"),
CPU: resource.MustParse("7m"),
},
Limit: v1alpha1.ResourceRequirements{
Mem: resource.MustParse("500Mi"),
CPU: resource.Quantity{},
},
Config: nil,
},
},
Scanner: v1alpha1.OptionalContainerConfig{
Enabled: false,
ContainerConfig: v1alpha1.ContainerConfig{
Image: v1alpha1.RepoTag{
Repo: repo("eraser-trivy-scanner"),
Tag: version.BuildVersion,
},
Request: v1alpha1.ResourceRequirements{
Mem: resource.MustParse("500Mi"),
CPU: resource.MustParse("1000m"),
},
Limit: v1alpha1.ResourceRequirements{
Mem: resource.MustParse("2Gi"),
CPU: resource.MustParse("1500m"),
},
Config: &defaultScannerConfig,
},
},
Eraser: v1alpha1.ContainerConfig{
Image: v1alpha1.RepoTag{
Repo: repo("eraser"),
Tag: version.BuildVersion,
},
Request: v1alpha1.ResourceRequirements{
Mem: resource.MustParse("25Mi"),
CPU: resource.MustParse("7m"),
},
Limit: v1alpha1.ResourceRequirements{
Mem: resource.MustParse("30Mi"),
CPU: resource.Quantity{},
},
Config: nil,
},
},
}
}
func repo(basename string) string {
if version.DefaultRepo == "" {
return basename
}
return fmt.Sprintf("%s/%s", version.DefaultRepo, basename)
}
================================================
FILE: api/v1alpha1/custom_conversions.go
================================================
package v1alpha1
import (
unversioned "github.com/eraser-dev/eraser/api/unversioned"
conversion "k8s.io/apimachinery/pkg/conversion"
)
//nolint:revive
func Convert_v1alpha1_ManagerConfig_To_unversioned_ManagerConfig(in *ManagerConfig, out *unversioned.ManagerConfig, s conversion.Scope) error {
return autoConvert_v1alpha1_ManagerConfig_To_unversioned_ManagerConfig(in, out, s)
}
//nolint:revive
func manualConvert_v1alpha1_Runtime_To_unversioned_RuntimeSpec(in *Runtime, out *unversioned.RuntimeSpec, _ conversion.Scope) error {
out.Name = unversioned.Runtime(string(*in))
rs, err := unversioned.ConvertRuntimeToRuntimeSpec(out.Name)
if err != nil {
return err
}
out.Address = rs.Address
return nil
}
//nolint:revive
func Convert_v1alpha1_Runtime_To_unversioned_RuntimeSpec(in *Runtime, out *unversioned.RuntimeSpec, s conversion.Scope) error {
return manualConvert_v1alpha1_Runtime_To_unversioned_RuntimeSpec(in, out, s)
}
//nolint:revive
func Convert_unversioned_ManagerConfig_To_v1alpha1_ManagerConfig(in *unversioned.ManagerConfig, out *ManagerConfig, s conversion.Scope) error {
return autoConvert_unversioned_ManagerConfig_To_v1alpha1_ManagerConfig(in, out, s)
}
//nolint:revive
func manualConvert_unversioned_RuntimeSpec_To_v1alpha1_Runtime(in *unversioned.RuntimeSpec, out *Runtime, _ conversion.Scope) error {
*out = Runtime(in.Name)
return nil
}
//nolint:revive
func Convert_unversioned_RuntimeSpec_To_v1alpha1_Runtime(in *unversioned.RuntimeSpec, out *Runtime, s conversion.Scope) error {
return manualConvert_unversioned_RuntimeSpec_To_v1alpha1_Runtime(in, out, s)
}
================================================
FILE: api/v1alpha1/doc.go
================================================
// Package v1alpha1 contains API Schema definitions for the eraser v1alpha1 API version.
// +k8s:conversion-gen=github.com/eraser-dev/eraser/api/unversioned
package v1alpha1
================================================
FILE: api/v1alpha1/eraserconfig_types.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"encoding/json"
"fmt"
"time"
"github.com/eraser-dev/eraser/api/unversioned"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
)
type (
Duration time.Duration
Runtime string
)
const (
RuntimeContainerd Runtime = "containerd"
RuntimeDockerShim Runtime = "dockershim"
RuntimeCrio Runtime = "crio"
)
func (td *Duration) UnmarshalJSON(b []byte) error {
var str string
err := json.Unmarshal(b, &str)
if err != nil {
return err
}
pd, err := time.ParseDuration(str)
if err != nil {
return err
}
*td = Duration(pd)
return nil
}
func (td *Duration) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, time.Duration(*td).String())), nil
}
func (r *Runtime) UnmarshalJSON(b []byte) error {
var str string
err := json.Unmarshal(b, &str)
if err != nil {
return err
}
switch rt := Runtime(str); rt {
case RuntimeContainerd, RuntimeDockerShim, RuntimeCrio:
*r = rt
default:
return fmt.Errorf("cannot determine runtime type: %s. valid values are containerd, dockershim, or crio", str)
}
return nil
}
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
type OptionalContainerConfig struct {
Enabled bool `json:"enabled,omitempty"`
ContainerConfig `json:",inline"`
}
type ContainerConfig struct {
Image RepoTag `json:"image,omitempty"`
Request ResourceRequirements `json:"request,omitempty"`
Limit ResourceRequirements `json:"limit,omitempty"`
Config *string `json:"config,omitempty"`
Volumes []corev1.Volume `json:"volumes,omitempty"`
}
type ManagerConfig struct {
Runtime Runtime `json:"runtime,omitempty"`
OTLPEndpoint string `json:"otlpEndpoint,omitempty"`
LogLevel string `json:"logLevel,omitempty"`
Scheduling ScheduleConfig `json:"scheduling,omitempty"`
Profile ProfileConfig `json:"profile,omitempty"`
ImageJob ImageJobConfig `json:"imageJob,omitempty"`
PullSecrets []string `json:"pullSecrets,omitempty"`
NodeFilter NodeFilterConfig `json:"nodeFilter,omitempty"`
PriorityClassName string `json:"priorityClassName,omitempty"`
}
type ScheduleConfig struct {
RepeatInterval Duration `json:"repeatInterval,omitempty"`
BeginImmediately bool `json:"beginImmediately,omitempty"`
}
type ProfileConfig struct {
Enabled bool `json:"enabled,omitempty"`
Port int `json:"port,omitempty"`
}
type ImageJobConfig struct {
SuccessRatio float64 `json:"successRatio,omitempty"`
Cleanup ImageJobCleanupConfig `json:"cleanup,omitempty"`
}
type ImageJobCleanupConfig struct {
DelayOnSuccess Duration `json:"delayOnSuccess,omitempty"`
DelayOnFailure Duration `json:"delayOnFailure,omitempty"`
}
type NodeFilterConfig struct {
Type string `json:"type,omitempty"`
Selectors []string `json:"selectors,omitempty"`
}
type ResourceRequirements struct {
Mem resource.Quantity `json:"mem,omitempty"`
CPU resource.Quantity `json:"cpu,omitempty"`
}
type RepoTag struct {
Repo string `json:"repo,omitempty"`
Tag string `json:"tag,omitempty"`
}
type Components struct {
Collector OptionalContainerConfig `json:"collector,omitempty"`
Scanner OptionalContainerConfig `json:"scanner,omitempty"`
Eraser ContainerConfig `json:"eraser,omitempty"`
}
//+kubebuilder:object:root=true
// EraserConfig is the Schema for the eraserconfigs API.
type EraserConfig struct {
metav1.TypeMeta `json:",inline"`
Manager ManagerConfig `json:"manager"`
Components Components `json:"components"`
}
func init() {
SchemeBuilder.Register(&EraserConfig{})
}
// In future versions of EraserConfig (for example, v1alpha2), the
// .Components.Eraser field has been renamed to .Components.Remover
// conversion-gen is unable to make the conversion automatically,
// and provides stubs for these functions, with a warning that they
// will not work properly. Because they are called by other generated
// functions, the names of these functions cannot change.
func Convert_v1alpha1_Components_To_unversioned_Components(in *Components, out *unversioned.Components, s conversion.Scope) error { //nolint:revive
if err := Convert_v1alpha1_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(&in.Collector, &out.Collector, s); err != nil {
return err
}
if err := Convert_v1alpha1_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(&in.Scanner, &out.Scanner, s); err != nil {
return err
}
return Convert_v1alpha1_ContainerConfig_To_unversioned_ContainerConfig(&in.Eraser, &out.Remover, s)
}
func Convert_unversioned_Components_To_v1alpha1_Components(in *unversioned.Components, out *Components, s conversion.Scope) error { //nolint:revive
if err := Convert_unversioned_OptionalContainerConfig_To_v1alpha1_OptionalContainerConfig(&in.Collector, &out.Collector, s); err != nil {
return err
}
if err := Convert_unversioned_OptionalContainerConfig_To_v1alpha1_OptionalContainerConfig(&in.Scanner, &out.Scanner, s); err != nil {
return err
}
return Convert_unversioned_ContainerConfig_To_v1alpha1_ContainerConfig(&in.Remover, &out.Eraser, s)
}
================================================
FILE: api/v1alpha1/groupversion_info.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1 contains API Schema definitions for the eraser.sh v1 API group
// +kubebuilder:object:generate=true
// +groupName=eraser.sh
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "eraser.sh", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
localSchemeBuilder = runtime.NewSchemeBuilder(SchemeBuilder.AddToScheme)
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
================================================
FILE: api/v1alpha1/imagejob_types.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Image struct {
ImageID string `json:"image_id"`
Names []string `json:"names,omitempty"`
Digests []string `json:"digests,omitempty"`
}
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// JobPhase defines the phase of an ImageJob status.
type JobPhase string
const (
PhaseRunning JobPhase = "Running"
PhaseCompleted JobPhase = "Completed"
PhaseFailed JobPhase = "Failed"
)
// ImageJobStatus defines the observed state of ImageJob.
type ImageJobStatus struct {
// number of pods that failed
Failed int `json:"failed"`
// number of pods that completed successfully
Succeeded int `json:"succeeded"`
// desired number of pods
Desired int `json:"desired"`
// number of nodes that were skipped e.g. because they are not a linux node
Skipped int `json:"skipped"`
// job running, successfully completed, or failed
Phase JobPhase `json:"phase"`
// Time to delay deletion until
DeleteAfter *metav1.Time `json:"deleteAfter,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope="Cluster"
// +kubebuilder:deprecatedversion:warning="v1alpha1 of the eraser API has been deprecated. Please migrate to v1."
// ImageJob is the Schema for the imagejobs API.
type ImageJob struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Status ImageJobStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// ImageJobList contains a list of ImageJob.
type ImageJobList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ImageJob `json:"items"`
}
func init() {
SchemeBuilder.Register(&ImageJob{}, &ImageJobList{})
}
================================================
FILE: api/v1alpha1/imagelist_types.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ImageListSpec defines the desired state of ImageList.
type ImageListSpec struct {
// The list of non-compliant images to delete if non-running.
Images []string `json:"images"`
}
// ImageListStatus defines the observed state of ImageList.
type ImageListStatus struct {
// Information when the job was completed.
Timestamp *metav1.Time `json:"timestamp"`
// Number of nodes that successfully ran the job
Success int64 `json:"success"`
// Number of nodes that failed to run the job
Failed int64 `json:"failed"`
// Number of nodes that were skipped due to a skip selector
Skipped int64 `json:"skipped"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope="Cluster"
// +kubebuilder:deprecatedversion:warning="v1alpha1 of the eraser API has been deprecated. Please migrate to v1."
// ImageList is the Schema for the imagelists API.
type ImageList struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ImageListSpec `json:"spec,omitempty"`
Status ImageListStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// ImageListList contains a list of ImageList.
type ImageListList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ImageList `json:"items"`
}
func init() {
SchemeBuilder.Register(&ImageList{}, &ImageListList{})
}
================================================
FILE: api/v1alpha1/zz_generated.conversion.go
================================================
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha1
import (
unsafe "unsafe"
unversioned "github.com/eraser-dev/eraser/api/unversioned"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*ContainerConfig)(nil), (*unversioned.ContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ContainerConfig_To_unversioned_ContainerConfig(a.(*ContainerConfig), b.(*unversioned.ContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ContainerConfig)(nil), (*ContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ContainerConfig_To_v1alpha1_ContainerConfig(a.(*unversioned.ContainerConfig), b.(*ContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*EraserConfig)(nil), (*unversioned.EraserConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_EraserConfig_To_unversioned_EraserConfig(a.(*EraserConfig), b.(*unversioned.EraserConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.EraserConfig)(nil), (*EraserConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_EraserConfig_To_v1alpha1_EraserConfig(a.(*unversioned.EraserConfig), b.(*EraserConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Image)(nil), (*unversioned.Image)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Image_To_unversioned_Image(a.(*Image), b.(*unversioned.Image), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.Image)(nil), (*Image)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_Image_To_v1alpha1_Image(a.(*unversioned.Image), b.(*Image), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJob)(nil), (*unversioned.ImageJob)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageJob_To_unversioned_ImageJob(a.(*ImageJob), b.(*unversioned.ImageJob), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJob)(nil), (*ImageJob)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJob_To_v1alpha1_ImageJob(a.(*unversioned.ImageJob), b.(*ImageJob), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJobCleanupConfig)(nil), (*unversioned.ImageJobCleanupConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(a.(*ImageJobCleanupConfig), b.(*unversioned.ImageJobCleanupConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJobCleanupConfig)(nil), (*ImageJobCleanupConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJobCleanupConfig_To_v1alpha1_ImageJobCleanupConfig(a.(*unversioned.ImageJobCleanupConfig), b.(*ImageJobCleanupConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJobConfig)(nil), (*unversioned.ImageJobConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageJobConfig_To_unversioned_ImageJobConfig(a.(*ImageJobConfig), b.(*unversioned.ImageJobConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJobConfig)(nil), (*ImageJobConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJobConfig_To_v1alpha1_ImageJobConfig(a.(*unversioned.ImageJobConfig), b.(*ImageJobConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJobList)(nil), (*unversioned.ImageJobList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageJobList_To_unversioned_ImageJobList(a.(*ImageJobList), b.(*unversioned.ImageJobList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJobList)(nil), (*ImageJobList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJobList_To_v1alpha1_ImageJobList(a.(*unversioned.ImageJobList), b.(*ImageJobList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJobStatus)(nil), (*unversioned.ImageJobStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageJobStatus_To_unversioned_ImageJobStatus(a.(*ImageJobStatus), b.(*unversioned.ImageJobStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJobStatus)(nil), (*ImageJobStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJobStatus_To_v1alpha1_ImageJobStatus(a.(*unversioned.ImageJobStatus), b.(*ImageJobStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageList)(nil), (*unversioned.ImageList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageList_To_unversioned_ImageList(a.(*ImageList), b.(*unversioned.ImageList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageList)(nil), (*ImageList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageList_To_v1alpha1_ImageList(a.(*unversioned.ImageList), b.(*ImageList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageListList)(nil), (*unversioned.ImageListList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageListList_To_unversioned_ImageListList(a.(*ImageListList), b.(*unversioned.ImageListList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageListList)(nil), (*ImageListList)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageListList_To_v1alpha1_ImageListList(a.(*unversioned.ImageListList), b.(*ImageListList), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageListSpec)(nil), (*unversioned.ImageListSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageListSpec_To_unversioned_ImageListSpec(a.(*ImageListSpec), b.(*unversioned.ImageListSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageListSpec)(nil), (*ImageListSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageListSpec_To_v1alpha1_ImageListSpec(a.(*unversioned.ImageListSpec), b.(*ImageListSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageListStatus)(nil), (*unversioned.ImageListStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ImageListStatus_To_unversioned_ImageListStatus(a.(*ImageListStatus), b.(*unversioned.ImageListStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageListStatus)(nil), (*ImageListStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageListStatus_To_v1alpha1_ImageListStatus(a.(*unversioned.ImageListStatus), b.(*ImageListStatus), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*NodeFilterConfig)(nil), (*unversioned.NodeFilterConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_NodeFilterConfig_To_unversioned_NodeFilterConfig(a.(*NodeFilterConfig), b.(*unversioned.NodeFilterConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.NodeFilterConfig)(nil), (*NodeFilterConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_NodeFilterConfig_To_v1alpha1_NodeFilterConfig(a.(*unversioned.NodeFilterConfig), b.(*NodeFilterConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*OptionalContainerConfig)(nil), (*unversioned.OptionalContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(a.(*OptionalContainerConfig), b.(*unversioned.OptionalContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.OptionalContainerConfig)(nil), (*OptionalContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_OptionalContainerConfig_To_v1alpha1_OptionalContainerConfig(a.(*unversioned.OptionalContainerConfig), b.(*OptionalContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ProfileConfig)(nil), (*unversioned.ProfileConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ProfileConfig_To_unversioned_ProfileConfig(a.(*ProfileConfig), b.(*unversioned.ProfileConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ProfileConfig)(nil), (*ProfileConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ProfileConfig_To_v1alpha1_ProfileConfig(a.(*unversioned.ProfileConfig), b.(*ProfileConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*RepoTag)(nil), (*unversioned.RepoTag)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_RepoTag_To_unversioned_RepoTag(a.(*RepoTag), b.(*unversioned.RepoTag), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.RepoTag)(nil), (*RepoTag)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_RepoTag_To_v1alpha1_RepoTag(a.(*unversioned.RepoTag), b.(*RepoTag), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ResourceRequirements)(nil), (*unversioned.ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ResourceRequirements_To_unversioned_ResourceRequirements(a.(*ResourceRequirements), b.(*unversioned.ResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ResourceRequirements)(nil), (*ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ResourceRequirements_To_v1alpha1_ResourceRequirements(a.(*unversioned.ResourceRequirements), b.(*ResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ScheduleConfig)(nil), (*unversioned.ScheduleConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ScheduleConfig_To_unversioned_ScheduleConfig(a.(*ScheduleConfig), b.(*unversioned.ScheduleConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ScheduleConfig)(nil), (*ScheduleConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ScheduleConfig_To_v1alpha1_ScheduleConfig(a.(*unversioned.ScheduleConfig), b.(*ScheduleConfig), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*unversioned.Components)(nil), (*Components)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_Components_To_v1alpha1_Components(a.(*unversioned.Components), b.(*Components), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*unversioned.ManagerConfig)(nil), (*ManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ManagerConfig_To_v1alpha1_ManagerConfig(a.(*unversioned.ManagerConfig), b.(*ManagerConfig), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*unversioned.RuntimeSpec)(nil), (*Runtime)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_RuntimeSpec_To_v1alpha1_Runtime(a.(*unversioned.RuntimeSpec), b.(*Runtime), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*Components)(nil), (*unversioned.Components)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Components_To_unversioned_Components(a.(*Components), b.(*unversioned.Components), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*ManagerConfig)(nil), (*unversioned.ManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_ManagerConfig_To_unversioned_ManagerConfig(a.(*ManagerConfig), b.(*unversioned.ManagerConfig), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*Runtime)(nil), (*unversioned.RuntimeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_Runtime_To_unversioned_RuntimeSpec(a.(*Runtime), b.(*unversioned.RuntimeSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha1_Components_To_unversioned_Components(in *Components, out *unversioned.Components, s conversion.Scope) error {
if err := Convert_v1alpha1_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(&in.Collector, &out.Collector, s); err != nil {
return err
}
if err := Convert_v1alpha1_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(&in.Scanner, &out.Scanner, s); err != nil {
return err
}
// WARNING: in.Eraser requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_unversioned_Components_To_v1alpha1_Components(in *unversioned.Components, out *Components, s conversion.Scope) error {
if err := Convert_unversioned_OptionalContainerConfig_To_v1alpha1_OptionalContainerConfig(&in.Collector, &out.Collector, s); err != nil {
return err
}
if err := Convert_unversioned_OptionalContainerConfig_To_v1alpha1_OptionalContainerConfig(&in.Scanner, &out.Scanner, s); err != nil {
return err
}
// WARNING: in.Remover requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1alpha1_ContainerConfig_To_unversioned_ContainerConfig(in *ContainerConfig, out *unversioned.ContainerConfig, s conversion.Scope) error {
if err := Convert_v1alpha1_RepoTag_To_unversioned_RepoTag(&in.Image, &out.Image, s); err != nil {
return err
}
if err := Convert_v1alpha1_ResourceRequirements_To_unversioned_ResourceRequirements(&in.Request, &out.Request, s); err != nil {
return err
}
if err := Convert_v1alpha1_ResourceRequirements_To_unversioned_ResourceRequirements(&in.Limit, &out.Limit, s); err != nil {
return err
}
out.Config = (*string)(unsafe.Pointer(in.Config))
out.Volumes = *(*[]v1.Volume)(unsafe.Pointer(&in.Volumes))
return nil
}
// Convert_v1alpha1_ContainerConfig_To_unversioned_ContainerConfig is an autogenerated conversion function.
func Convert_v1alpha1_ContainerConfig_To_unversioned_ContainerConfig(in *ContainerConfig, out *unversioned.ContainerConfig, s conversion.Scope) error {
return autoConvert_v1alpha1_ContainerConfig_To_unversioned_ContainerConfig(in, out, s)
}
func autoConvert_unversioned_ContainerConfig_To_v1alpha1_ContainerConfig(in *unversioned.ContainerConfig, out *ContainerConfig, s conversion.Scope) error {
if err := Convert_unversioned_RepoTag_To_v1alpha1_RepoTag(&in.Image, &out.Image, s); err != nil {
return err
}
if err := Convert_unversioned_ResourceRequirements_To_v1alpha1_ResourceRequirements(&in.Request, &out.Request, s); err != nil {
return err
}
if err := Convert_unversioned_ResourceRequirements_To_v1alpha1_ResourceRequirements(&in.Limit, &out.Limit, s); err != nil {
return err
}
out.Config = (*string)(unsafe.Pointer(in.Config))
out.Volumes = *(*[]v1.Volume)(unsafe.Pointer(&in.Volumes))
return nil
}
// Convert_unversioned_ContainerConfig_To_v1alpha1_ContainerConfig is an autogenerated conversion function.
func Convert_unversioned_ContainerConfig_To_v1alpha1_ContainerConfig(in *unversioned.ContainerConfig, out *ContainerConfig, s conversion.Scope) error {
return autoConvert_unversioned_ContainerConfig_To_v1alpha1_ContainerConfig(in, out, s)
}
func autoConvert_v1alpha1_EraserConfig_To_unversioned_EraserConfig(in *EraserConfig, out *unversioned.EraserConfig, s conversion.Scope) error {
if err := Convert_v1alpha1_ManagerConfig_To_unversioned_ManagerConfig(&in.Manager, &out.Manager, s); err != nil {
return err
}
if err := Convert_v1alpha1_Components_To_unversioned_Components(&in.Components, &out.Components, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_EraserConfig_To_unversioned_EraserConfig is an autogenerated conversion function.
func Convert_v1alpha1_EraserConfig_To_unversioned_EraserConfig(in *EraserConfig, out *unversioned.EraserConfig, s conversion.Scope) error {
return autoConvert_v1alpha1_EraserConfig_To_unversioned_EraserConfig(in, out, s)
}
func autoConvert_unversioned_EraserConfig_To_v1alpha1_EraserConfig(in *unversioned.EraserConfig, out *EraserConfig, s conversion.Scope) error {
if err := Convert_unversioned_ManagerConfig_To_v1alpha1_ManagerConfig(&in.Manager, &out.Manager, s); err != nil {
return err
}
if err := Convert_unversioned_Components_To_v1alpha1_Components(&in.Components, &out.Components, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_EraserConfig_To_v1alpha1_EraserConfig is an autogenerated conversion function.
func Convert_unversioned_EraserConfig_To_v1alpha1_EraserConfig(in *unversioned.EraserConfig, out *EraserConfig, s conversion.Scope) error {
return autoConvert_unversioned_EraserConfig_To_v1alpha1_EraserConfig(in, out, s)
}
func autoConvert_v1alpha1_Image_To_unversioned_Image(in *Image, out *unversioned.Image, s conversion.Scope) error {
out.ImageID = in.ImageID
out.Names = *(*[]string)(unsafe.Pointer(&in.Names))
out.Digests = *(*[]string)(unsafe.Pointer(&in.Digests))
return nil
}
// Convert_v1alpha1_Image_To_unversioned_Image is an autogenerated conversion function.
func Convert_v1alpha1_Image_To_unversioned_Image(in *Image, out *unversioned.Image, s conversion.Scope) error {
return autoConvert_v1alpha1_Image_To_unversioned_Image(in, out, s)
}
func autoConvert_unversioned_Image_To_v1alpha1_Image(in *unversioned.Image, out *Image, s conversion.Scope) error {
out.ImageID = in.ImageID
out.Names = *(*[]string)(unsafe.Pointer(&in.Names))
out.Digests = *(*[]string)(unsafe.Pointer(&in.Digests))
return nil
}
// Convert_unversioned_Image_To_v1alpha1_Image is an autogenerated conversion function.
func Convert_unversioned_Image_To_v1alpha1_Image(in *unversioned.Image, out *Image, s conversion.Scope) error {
return autoConvert_unversioned_Image_To_v1alpha1_Image(in, out, s)
}
func autoConvert_v1alpha1_ImageJob_To_unversioned_ImageJob(in *ImageJob, out *unversioned.ImageJob, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_ImageJobStatus_To_unversioned_ImageJobStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ImageJob_To_unversioned_ImageJob is an autogenerated conversion function.
func Convert_v1alpha1_ImageJob_To_unversioned_ImageJob(in *ImageJob, out *unversioned.ImageJob, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageJob_To_unversioned_ImageJob(in, out, s)
}
func autoConvert_unversioned_ImageJob_To_v1alpha1_ImageJob(in *unversioned.ImageJob, out *ImageJob, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_unversioned_ImageJobStatus_To_v1alpha1_ImageJobStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_ImageJob_To_v1alpha1_ImageJob is an autogenerated conversion function.
func Convert_unversioned_ImageJob_To_v1alpha1_ImageJob(in *unversioned.ImageJob, out *ImageJob, s conversion.Scope) error {
return autoConvert_unversioned_ImageJob_To_v1alpha1_ImageJob(in, out, s)
}
func autoConvert_v1alpha1_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(in *ImageJobCleanupConfig, out *unversioned.ImageJobCleanupConfig, s conversion.Scope) error {
out.DelayOnSuccess = unversioned.Duration(in.DelayOnSuccess)
out.DelayOnFailure = unversioned.Duration(in.DelayOnFailure)
return nil
}
// Convert_v1alpha1_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig is an autogenerated conversion function.
func Convert_v1alpha1_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(in *ImageJobCleanupConfig, out *unversioned.ImageJobCleanupConfig, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(in, out, s)
}
func autoConvert_unversioned_ImageJobCleanupConfig_To_v1alpha1_ImageJobCleanupConfig(in *unversioned.ImageJobCleanupConfig, out *ImageJobCleanupConfig, s conversion.Scope) error {
out.DelayOnSuccess = Duration(in.DelayOnSuccess)
out.DelayOnFailure = Duration(in.DelayOnFailure)
return nil
}
// Convert_unversioned_ImageJobCleanupConfig_To_v1alpha1_ImageJobCleanupConfig is an autogenerated conversion function.
func Convert_unversioned_ImageJobCleanupConfig_To_v1alpha1_ImageJobCleanupConfig(in *unversioned.ImageJobCleanupConfig, out *ImageJobCleanupConfig, s conversion.Scope) error {
return autoConvert_unversioned_ImageJobCleanupConfig_To_v1alpha1_ImageJobCleanupConfig(in, out, s)
}
func autoConvert_v1alpha1_ImageJobConfig_To_unversioned_ImageJobConfig(in *ImageJobConfig, out *unversioned.ImageJobConfig, s conversion.Scope) error {
out.SuccessRatio = in.SuccessRatio
if err := Convert_v1alpha1_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(&in.Cleanup, &out.Cleanup, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ImageJobConfig_To_unversioned_ImageJobConfig is an autogenerated conversion function.
func Convert_v1alpha1_ImageJobConfig_To_unversioned_ImageJobConfig(in *ImageJobConfig, out *unversioned.ImageJobConfig, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageJobConfig_To_unversioned_ImageJobConfig(in, out, s)
}
func autoConvert_unversioned_ImageJobConfig_To_v1alpha1_ImageJobConfig(in *unversioned.ImageJobConfig, out *ImageJobConfig, s conversion.Scope) error {
out.SuccessRatio = in.SuccessRatio
if err := Convert_unversioned_ImageJobCleanupConfig_To_v1alpha1_ImageJobCleanupConfig(&in.Cleanup, &out.Cleanup, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_ImageJobConfig_To_v1alpha1_ImageJobConfig is an autogenerated conversion function.
func Convert_unversioned_ImageJobConfig_To_v1alpha1_ImageJobConfig(in *unversioned.ImageJobConfig, out *ImageJobConfig, s conversion.Scope) error {
return autoConvert_unversioned_ImageJobConfig_To_v1alpha1_ImageJobConfig(in, out, s)
}
func autoConvert_v1alpha1_ImageJobList_To_unversioned_ImageJobList(in *ImageJobList, out *unversioned.ImageJobList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]unversioned.ImageJob)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_ImageJobList_To_unversioned_ImageJobList is an autogenerated conversion function.
func Convert_v1alpha1_ImageJobList_To_unversioned_ImageJobList(in *ImageJobList, out *unversioned.ImageJobList, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageJobList_To_unversioned_ImageJobList(in, out, s)
}
func autoConvert_unversioned_ImageJobList_To_v1alpha1_ImageJobList(in *unversioned.ImageJobList, out *ImageJobList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]ImageJob)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_unversioned_ImageJobList_To_v1alpha1_ImageJobList is an autogenerated conversion function.
func Convert_unversioned_ImageJobList_To_v1alpha1_ImageJobList(in *unversioned.ImageJobList, out *ImageJobList, s conversion.Scope) error {
return autoConvert_unversioned_ImageJobList_To_v1alpha1_ImageJobList(in, out, s)
}
func autoConvert_v1alpha1_ImageJobStatus_To_unversioned_ImageJobStatus(in *ImageJobStatus, out *unversioned.ImageJobStatus, s conversion.Scope) error {
out.Failed = in.Failed
out.Succeeded = in.Succeeded
out.Desired = in.Desired
out.Skipped = in.Skipped
out.Phase = unversioned.JobPhase(in.Phase)
out.DeleteAfter = (*metav1.Time)(unsafe.Pointer(in.DeleteAfter))
return nil
}
// Convert_v1alpha1_ImageJobStatus_To_unversioned_ImageJobStatus is an autogenerated conversion function.
func Convert_v1alpha1_ImageJobStatus_To_unversioned_ImageJobStatus(in *ImageJobStatus, out *unversioned.ImageJobStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageJobStatus_To_unversioned_ImageJobStatus(in, out, s)
}
func autoConvert_unversioned_ImageJobStatus_To_v1alpha1_ImageJobStatus(in *unversioned.ImageJobStatus, out *ImageJobStatus, s conversion.Scope) error {
out.Failed = in.Failed
out.Succeeded = in.Succeeded
out.Desired = in.Desired
out.Skipped = in.Skipped
out.Phase = JobPhase(in.Phase)
out.DeleteAfter = (*metav1.Time)(unsafe.Pointer(in.DeleteAfter))
return nil
}
// Convert_unversioned_ImageJobStatus_To_v1alpha1_ImageJobStatus is an autogenerated conversion function.
func Convert_unversioned_ImageJobStatus_To_v1alpha1_ImageJobStatus(in *unversioned.ImageJobStatus, out *ImageJobStatus, s conversion.Scope) error {
return autoConvert_unversioned_ImageJobStatus_To_v1alpha1_ImageJobStatus(in, out, s)
}
func autoConvert_v1alpha1_ImageList_To_unversioned_ImageList(in *ImageList, out *unversioned.ImageList, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha1_ImageListSpec_To_unversioned_ImageListSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_ImageListStatus_To_unversioned_ImageListStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_ImageList_To_unversioned_ImageList is an autogenerated conversion function.
func Convert_v1alpha1_ImageList_To_unversioned_ImageList(in *ImageList, out *unversioned.ImageList, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageList_To_unversioned_ImageList(in, out, s)
}
func autoConvert_unversioned_ImageList_To_v1alpha1_ImageList(in *unversioned.ImageList, out *ImageList, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_unversioned_ImageListSpec_To_v1alpha1_ImageListSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_unversioned_ImageListStatus_To_v1alpha1_ImageListStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_ImageList_To_v1alpha1_ImageList is an autogenerated conversion function.
func Convert_unversioned_ImageList_To_v1alpha1_ImageList(in *unversioned.ImageList, out *ImageList, s conversion.Scope) error {
return autoConvert_unversioned_ImageList_To_v1alpha1_ImageList(in, out, s)
}
func autoConvert_v1alpha1_ImageListList_To_unversioned_ImageListList(in *ImageListList, out *unversioned.ImageListList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]unversioned.ImageList)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_v1alpha1_ImageListList_To_unversioned_ImageListList is an autogenerated conversion function.
func Convert_v1alpha1_ImageListList_To_unversioned_ImageListList(in *ImageListList, out *unversioned.ImageListList, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageListList_To_unversioned_ImageListList(in, out, s)
}
func autoConvert_unversioned_ImageListList_To_v1alpha1_ImageListList(in *unversioned.ImageListList, out *ImageListList, s conversion.Scope) error {
out.ListMeta = in.ListMeta
out.Items = *(*[]ImageList)(unsafe.Pointer(&in.Items))
return nil
}
// Convert_unversioned_ImageListList_To_v1alpha1_ImageListList is an autogenerated conversion function.
func Convert_unversioned_ImageListList_To_v1alpha1_ImageListList(in *unversioned.ImageListList, out *ImageListList, s conversion.Scope) error {
return autoConvert_unversioned_ImageListList_To_v1alpha1_ImageListList(in, out, s)
}
func autoConvert_v1alpha1_ImageListSpec_To_unversioned_ImageListSpec(in *ImageListSpec, out *unversioned.ImageListSpec, s conversion.Scope) error {
out.Images = *(*[]string)(unsafe.Pointer(&in.Images))
return nil
}
// Convert_v1alpha1_ImageListSpec_To_unversioned_ImageListSpec is an autogenerated conversion function.
func Convert_v1alpha1_ImageListSpec_To_unversioned_ImageListSpec(in *ImageListSpec, out *unversioned.ImageListSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageListSpec_To_unversioned_ImageListSpec(in, out, s)
}
func autoConvert_unversioned_ImageListSpec_To_v1alpha1_ImageListSpec(in *unversioned.ImageListSpec, out *ImageListSpec, s conversion.Scope) error {
out.Images = *(*[]string)(unsafe.Pointer(&in.Images))
return nil
}
// Convert_unversioned_ImageListSpec_To_v1alpha1_ImageListSpec is an autogenerated conversion function.
func Convert_unversioned_ImageListSpec_To_v1alpha1_ImageListSpec(in *unversioned.ImageListSpec, out *ImageListSpec, s conversion.Scope) error {
return autoConvert_unversioned_ImageListSpec_To_v1alpha1_ImageListSpec(in, out, s)
}
func autoConvert_v1alpha1_ImageListStatus_To_unversioned_ImageListStatus(in *ImageListStatus, out *unversioned.ImageListStatus, s conversion.Scope) error {
out.Timestamp = (*metav1.Time)(unsafe.Pointer(in.Timestamp))
out.Success = in.Success
out.Failed = in.Failed
out.Skipped = in.Skipped
return nil
}
// Convert_v1alpha1_ImageListStatus_To_unversioned_ImageListStatus is an autogenerated conversion function.
func Convert_v1alpha1_ImageListStatus_To_unversioned_ImageListStatus(in *ImageListStatus, out *unversioned.ImageListStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_ImageListStatus_To_unversioned_ImageListStatus(in, out, s)
}
func autoConvert_unversioned_ImageListStatus_To_v1alpha1_ImageListStatus(in *unversioned.ImageListStatus, out *ImageListStatus, s conversion.Scope) error {
out.Timestamp = (*metav1.Time)(unsafe.Pointer(in.Timestamp))
out.Success = in.Success
out.Failed = in.Failed
out.Skipped = in.Skipped
return nil
}
// Convert_unversioned_ImageListStatus_To_v1alpha1_ImageListStatus is an autogenerated conversion function.
func Convert_unversioned_ImageListStatus_To_v1alpha1_ImageListStatus(in *unversioned.ImageListStatus, out *ImageListStatus, s conversion.Scope) error {
return autoConvert_unversioned_ImageListStatus_To_v1alpha1_ImageListStatus(in, out, s)
}
func autoConvert_v1alpha1_ManagerConfig_To_unversioned_ManagerConfig(in *ManagerConfig, out *unversioned.ManagerConfig, s conversion.Scope) error {
if err := Convert_v1alpha1_Runtime_To_unversioned_RuntimeSpec(&in.Runtime, &out.Runtime, s); err != nil {
return err
}
out.OTLPEndpoint = in.OTLPEndpoint
out.LogLevel = in.LogLevel
if err := Convert_v1alpha1_ScheduleConfig_To_unversioned_ScheduleConfig(&in.Scheduling, &out.Scheduling, s); err != nil {
return err
}
if err := Convert_v1alpha1_ProfileConfig_To_unversioned_ProfileConfig(&in.Profile, &out.Profile, s); err != nil {
return err
}
if err := Convert_v1alpha1_ImageJobConfig_To_unversioned_ImageJobConfig(&in.ImageJob, &out.ImageJob, s); err != nil {
return err
}
out.PullSecrets = *(*[]string)(unsafe.Pointer(&in.PullSecrets))
if err := Convert_v1alpha1_NodeFilterConfig_To_unversioned_NodeFilterConfig(&in.NodeFilter, &out.NodeFilter, s); err != nil {
return err
}
out.PriorityClassName = in.PriorityClassName
return nil
}
func autoConvert_unversioned_ManagerConfig_To_v1alpha1_ManagerConfig(in *unversioned.ManagerConfig, out *ManagerConfig, s conversion.Scope) error {
if err := Convert_unversioned_RuntimeSpec_To_v1alpha1_Runtime(&in.Runtime, &out.Runtime, s); err != nil {
return err
}
out.OTLPEndpoint = in.OTLPEndpoint
out.LogLevel = in.LogLevel
if err := Convert_unversioned_ScheduleConfig_To_v1alpha1_ScheduleConfig(&in.Scheduling, &out.Scheduling, s); err != nil {
return err
}
if err := Convert_unversioned_ProfileConfig_To_v1alpha1_ProfileConfig(&in.Profile, &out.Profile, s); err != nil {
return err
}
if err := Convert_unversioned_ImageJobConfig_To_v1alpha1_ImageJobConfig(&in.ImageJob, &out.ImageJob, s); err != nil {
return err
}
out.PullSecrets = *(*[]string)(unsafe.Pointer(&in.PullSecrets))
if err := Convert_unversioned_NodeFilterConfig_To_v1alpha1_NodeFilterConfig(&in.NodeFilter, &out.NodeFilter, s); err != nil {
return err
}
out.PriorityClassName = in.PriorityClassName
// WARNING: in.AdditionalPodLabels requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1alpha1_NodeFilterConfig_To_unversioned_NodeFilterConfig(in *NodeFilterConfig, out *unversioned.NodeFilterConfig, s conversion.Scope) error {
out.Type = in.Type
out.Selectors = *(*[]string)(unsafe.Pointer(&in.Selectors))
return nil
}
// Convert_v1alpha1_NodeFilterConfig_To_unversioned_NodeFilterConfig is an autogenerated conversion function.
func Convert_v1alpha1_NodeFilterConfig_To_unversioned_NodeFilterConfig(in *NodeFilterConfig, out *unversioned.NodeFilterConfig, s conversion.Scope) error {
return autoConvert_v1alpha1_NodeFilterConfig_To_unversioned_NodeFilterConfig(in, out, s)
}
func autoConvert_unversioned_NodeFilterConfig_To_v1alpha1_NodeFilterConfig(in *unversioned.NodeFilterConfig, out *NodeFilterConfig, s conversion.Scope) error {
out.Type = in.Type
out.Selectors = *(*[]string)(unsafe.Pointer(&in.Selectors))
return nil
}
// Convert_unversioned_NodeFilterConfig_To_v1alpha1_NodeFilterConfig is an autogenerated conversion function.
func Convert_unversioned_NodeFilterConfig_To_v1alpha1_NodeFilterConfig(in *unversioned.NodeFilterConfig, out *NodeFilterConfig, s conversion.Scope) error {
return autoConvert_unversioned_NodeFilterConfig_To_v1alpha1_NodeFilterConfig(in, out, s)
}
func autoConvert_v1alpha1_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(in *OptionalContainerConfig, out *unversioned.OptionalContainerConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
if err := Convert_v1alpha1_ContainerConfig_To_unversioned_ContainerConfig(&in.ContainerConfig, &out.ContainerConfig, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_OptionalContainerConfig_To_unversioned_OptionalContainerConfig is an autogenerated conversion function.
func Convert_v1alpha1_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(in *OptionalContainerConfig, out *unversioned.OptionalContainerConfig, s conversion.Scope) error {
return autoConvert_v1alpha1_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(in, out, s)
}
func autoConvert_unversioned_OptionalContainerConfig_To_v1alpha1_OptionalContainerConfig(in *unversioned.OptionalContainerConfig, out *OptionalContainerConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
if err := Convert_unversioned_ContainerConfig_To_v1alpha1_ContainerConfig(&in.ContainerConfig, &out.ContainerConfig, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_OptionalContainerConfig_To_v1alpha1_OptionalContainerConfig is an autogenerated conversion function.
func Convert_unversioned_OptionalContainerConfig_To_v1alpha1_OptionalContainerConfig(in *unversioned.OptionalContainerConfig, out *OptionalContainerConfig, s conversion.Scope) error {
return autoConvert_unversioned_OptionalContainerConfig_To_v1alpha1_OptionalContainerConfig(in, out, s)
}
func autoConvert_v1alpha1_ProfileConfig_To_unversioned_ProfileConfig(in *ProfileConfig, out *unversioned.ProfileConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Port = in.Port
return nil
}
// Convert_v1alpha1_ProfileConfig_To_unversioned_ProfileConfig is an autogenerated conversion function.
func Convert_v1alpha1_ProfileConfig_To_unversioned_ProfileConfig(in *ProfileConfig, out *unversioned.ProfileConfig, s conversion.Scope) error {
return autoConvert_v1alpha1_ProfileConfig_To_unversioned_ProfileConfig(in, out, s)
}
func autoConvert_unversioned_ProfileConfig_To_v1alpha1_ProfileConfig(in *unversioned.ProfileConfig, out *ProfileConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Port = in.Port
return nil
}
// Convert_unversioned_ProfileConfig_To_v1alpha1_ProfileConfig is an autogenerated conversion function.
func Convert_unversioned_ProfileConfig_To_v1alpha1_ProfileConfig(in *unversioned.ProfileConfig, out *ProfileConfig, s conversion.Scope) error {
return autoConvert_unversioned_ProfileConfig_To_v1alpha1_ProfileConfig(in, out, s)
}
func autoConvert_v1alpha1_RepoTag_To_unversioned_RepoTag(in *RepoTag, out *unversioned.RepoTag, s conversion.Scope) error {
out.Repo = in.Repo
out.Tag = in.Tag
return nil
}
// Convert_v1alpha1_RepoTag_To_unversioned_RepoTag is an autogenerated conversion function.
func Convert_v1alpha1_RepoTag_To_unversioned_RepoTag(in *RepoTag, out *unversioned.RepoTag, s conversion.Scope) error {
return autoConvert_v1alpha1_RepoTag_To_unversioned_RepoTag(in, out, s)
}
func autoConvert_unversioned_RepoTag_To_v1alpha1_RepoTag(in *unversioned.RepoTag, out *RepoTag, s conversion.Scope) error {
out.Repo = in.Repo
out.Tag = in.Tag
return nil
}
// Convert_unversioned_RepoTag_To_v1alpha1_RepoTag is an autogenerated conversion function.
func Convert_unversioned_RepoTag_To_v1alpha1_RepoTag(in *unversioned.RepoTag, out *RepoTag, s conversion.Scope) error {
return autoConvert_unversioned_RepoTag_To_v1alpha1_RepoTag(in, out, s)
}
func autoConvert_v1alpha1_ResourceRequirements_To_unversioned_ResourceRequirements(in *ResourceRequirements, out *unversioned.ResourceRequirements, s conversion.Scope) error {
out.Mem = in.Mem
out.CPU = in.CPU
return nil
}
// Convert_v1alpha1_ResourceRequirements_To_unversioned_ResourceRequirements is an autogenerated conversion function.
func Convert_v1alpha1_ResourceRequirements_To_unversioned_ResourceRequirements(in *ResourceRequirements, out *unversioned.ResourceRequirements, s conversion.Scope) error {
return autoConvert_v1alpha1_ResourceRequirements_To_unversioned_ResourceRequirements(in, out, s)
}
func autoConvert_unversioned_ResourceRequirements_To_v1alpha1_ResourceRequirements(in *unversioned.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error {
out.Mem = in.Mem
out.CPU = in.CPU
return nil
}
// Convert_unversioned_ResourceRequirements_To_v1alpha1_ResourceRequirements is an autogenerated conversion function.
func Convert_unversioned_ResourceRequirements_To_v1alpha1_ResourceRequirements(in *unversioned.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error {
return autoConvert_unversioned_ResourceRequirements_To_v1alpha1_ResourceRequirements(in, out, s)
}
func autoConvert_v1alpha1_ScheduleConfig_To_unversioned_ScheduleConfig(in *ScheduleConfig, out *unversioned.ScheduleConfig, s conversion.Scope) error {
out.RepeatInterval = unversioned.Duration(in.RepeatInterval)
out.BeginImmediately = in.BeginImmediately
return nil
}
// Convert_v1alpha1_ScheduleConfig_To_unversioned_ScheduleConfig is an autogenerated conversion function.
func Convert_v1alpha1_ScheduleConfig_To_unversioned_ScheduleConfig(in *ScheduleConfig, out *unversioned.ScheduleConfig, s conversion.Scope) error {
return autoConvert_v1alpha1_ScheduleConfig_To_unversioned_ScheduleConfig(in, out, s)
}
func autoConvert_unversioned_ScheduleConfig_To_v1alpha1_ScheduleConfig(in *unversioned.ScheduleConfig, out *ScheduleConfig, s conversion.Scope) error {
out.RepeatInterval = Duration(in.RepeatInterval)
out.BeginImmediately = in.BeginImmediately
return nil
}
// Convert_unversioned_ScheduleConfig_To_v1alpha1_ScheduleConfig is an autogenerated conversion function.
func Convert_unversioned_ScheduleConfig_To_v1alpha1_ScheduleConfig(in *unversioned.ScheduleConfig, out *ScheduleConfig, s conversion.Scope) error {
return autoConvert_unversioned_ScheduleConfig_To_v1alpha1_ScheduleConfig(in, out, s)
}
================================================
FILE: api/v1alpha1/zz_generated.deepcopy.go
================================================
//go:build !ignore_autogenerated
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Components) DeepCopyInto(out *Components) {
*out = *in
in.Collector.DeepCopyInto(&out.Collector)
in.Scanner.DeepCopyInto(&out.Scanner)
in.Eraser.DeepCopyInto(&out.Eraser)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Components.
func (in *Components) DeepCopy() *Components {
if in == nil {
return nil
}
out := new(Components)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerConfig) DeepCopyInto(out *ContainerConfig) {
*out = *in
out.Image = in.Image
in.Request.DeepCopyInto(&out.Request)
in.Limit.DeepCopyInto(&out.Limit)
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(string)
**out = **in
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerConfig.
func (in *ContainerConfig) DeepCopy() *ContainerConfig {
if in == nil {
return nil
}
out := new(ContainerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EraserConfig) DeepCopyInto(out *EraserConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Manager.DeepCopyInto(&out.Manager)
in.Components.DeepCopyInto(&out.Components)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EraserConfig.
func (in *EraserConfig) DeepCopy() *EraserConfig {
if in == nil {
return nil
}
out := new(EraserConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EraserConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Image) DeepCopyInto(out *Image) {
*out = *in
if in.Names != nil {
in, out := &in.Names, &out.Names
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Digests != nil {
in, out := &in.Digests, &out.Digests
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
func (in *Image) DeepCopy() *Image {
if in == nil {
return nil
}
out := new(Image)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJob) DeepCopyInto(out *ImageJob) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJob.
func (in *ImageJob) DeepCopy() *ImageJob {
if in == nil {
return nil
}
out := new(ImageJob)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageJob) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobCleanupConfig) DeepCopyInto(out *ImageJobCleanupConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobCleanupConfig.
func (in *ImageJobCleanupConfig) DeepCopy() *ImageJobCleanupConfig {
if in == nil {
return nil
}
out := new(ImageJobCleanupConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobConfig) DeepCopyInto(out *ImageJobConfig) {
*out = *in
out.Cleanup = in.Cleanup
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobConfig.
func (in *ImageJobConfig) DeepCopy() *ImageJobConfig {
if in == nil {
return nil
}
out := new(ImageJobConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobList) DeepCopyInto(out *ImageJobList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ImageJob, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobList.
func (in *ImageJobList) DeepCopy() *ImageJobList {
if in == nil {
return nil
}
out := new(ImageJobList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageJobList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobStatus) DeepCopyInto(out *ImageJobStatus) {
*out = *in
if in.DeleteAfter != nil {
in, out := &in.DeleteAfter, &out.DeleteAfter
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobStatus.
func (in *ImageJobStatus) DeepCopy() *ImageJobStatus {
if in == nil {
return nil
}
out := new(ImageJobStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageList) DeepCopyInto(out *ImageList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList.
func (in *ImageList) DeepCopy() *ImageList {
if in == nil {
return nil
}
out := new(ImageList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageListList) DeepCopyInto(out *ImageListList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ImageList, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageListList.
func (in *ImageListList) DeepCopy() *ImageListList {
if in == nil {
return nil
}
out := new(ImageListList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ImageListList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageListSpec) DeepCopyInto(out *ImageListSpec) {
*out = *in
if in.Images != nil {
in, out := &in.Images, &out.Images
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageListSpec.
func (in *ImageListSpec) DeepCopy() *ImageListSpec {
if in == nil {
return nil
}
out := new(ImageListSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageListStatus) DeepCopyInto(out *ImageListStatus) {
*out = *in
if in.Timestamp != nil {
in, out := &in.Timestamp, &out.Timestamp
*out = (*in).DeepCopy()
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageListStatus.
func (in *ImageListStatus) DeepCopy() *ImageListStatus {
if in == nil {
return nil
}
out := new(ImageListStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManagerConfig) DeepCopyInto(out *ManagerConfig) {
*out = *in
out.Scheduling = in.Scheduling
out.Profile = in.Profile
out.ImageJob = in.ImageJob
if in.PullSecrets != nil {
in, out := &in.PullSecrets, &out.PullSecrets
*out = make([]string, len(*in))
copy(*out, *in)
}
in.NodeFilter.DeepCopyInto(&out.NodeFilter)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagerConfig.
func (in *ManagerConfig) DeepCopy() *ManagerConfig {
if in == nil {
return nil
}
out := new(ManagerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeFilterConfig) DeepCopyInto(out *NodeFilterConfig) {
*out = *in
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFilterConfig.
func (in *NodeFilterConfig) DeepCopy() *NodeFilterConfig {
if in == nil {
return nil
}
out := new(NodeFilterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OptionalContainerConfig) DeepCopyInto(out *OptionalContainerConfig) {
*out = *in
in.ContainerConfig.DeepCopyInto(&out.ContainerConfig)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalContainerConfig.
func (in *OptionalContainerConfig) DeepCopy() *OptionalContainerConfig {
if in == nil {
return nil
}
out := new(OptionalContainerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProfileConfig) DeepCopyInto(out *ProfileConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileConfig.
func (in *ProfileConfig) DeepCopy() *ProfileConfig {
if in == nil {
return nil
}
out := new(ProfileConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RepoTag) DeepCopyInto(out *RepoTag) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepoTag.
func (in *RepoTag) DeepCopy() *RepoTag {
if in == nil {
return nil
}
out := new(RepoTag)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) {
*out = *in
out.Mem = in.Mem.DeepCopy()
out.CPU = in.CPU.DeepCopy()
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements.
func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
if in == nil {
return nil
}
out := new(ResourceRequirements)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduleConfig) DeepCopyInto(out *ScheduleConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleConfig.
func (in *ScheduleConfig) DeepCopy() *ScheduleConfig {
if in == nil {
return nil
}
out := new(ScheduleConfig)
in.DeepCopyInto(out)
return out
}
================================================
FILE: api/v1alpha2/config/config.go
================================================
package config
import (
"fmt"
"time"
"github.com/eraser-dev/eraser/api/v1alpha2"
"github.com/eraser-dev/eraser/version"
"k8s.io/apimachinery/pkg/api/resource"
)
var defaultScannerConfig = `
cacheDir: /var/lib/trivy
dbRepo: ghcr.io/aquasecurity/trivy-db
deleteFailedImages: true
deleteEOLImages: true
vulnerabilities:
ignoreUnfixed: false
types:
- os
- library
securityChecks: # need to be documented; determined by trivy, not us
- vuln
severities:
- CRITICAL
- HIGH
- MEDIUM
- LOW
`
const (
noDelay = v1alpha2.Duration(0)
oneDay = v1alpha2.Duration(time.Hour * 24)
)
func Default() *v1alpha2.EraserConfig {
return &v1alpha2.EraserConfig{
Manager: v1alpha2.ManagerConfig{
Runtime: "containerd",
OTLPEndpoint: "",
LogLevel: "info",
Scheduling: v1alpha2.ScheduleConfig{
RepeatInterval: v1alpha2.Duration(oneDay),
BeginImmediately: true,
},
Profile: v1alpha2.ProfileConfig{
Enabled: false,
Port: 6060,
},
ImageJob: v1alpha2.ImageJobConfig{
SuccessRatio: 1.0,
Cleanup: v1alpha2.ImageJobCleanupConfig{
DelayOnSuccess: noDelay,
DelayOnFailure: oneDay,
},
},
PullSecrets: []string{},
NodeFilter: v1alpha2.NodeFilterConfig{
Type: "exclude",
Selectors: []string{
"eraser.sh/cleanup.filter",
},
},
},
Components: v1alpha2.Components{
Collector: v1alpha2.OptionalContainerConfig{
Enabled: false,
ContainerConfig: v1alpha2.ContainerConfig{
Image: v1alpha2.RepoTag{
Repo: repo("collector"),
Tag: version.BuildVersion,
},
Request: v1alpha2.ResourceRequirements{
Mem: resource.MustParse("25Mi"),
CPU: resource.MustParse("7m"),
},
Limit: v1alpha2.ResourceRequirements{
Mem: resource.MustParse("500Mi"),
CPU: resource.Quantity{},
},
Config: nil,
},
},
Scanner: v1alpha2.OptionalContainerConfig{
Enabled: false,
ContainerConfig: v1alpha2.ContainerConfig{
Image: v1alpha2.RepoTag{
Repo: repo("eraser-trivy-scanner"),
Tag: version.BuildVersion,
},
Request: v1alpha2.ResourceRequirements{
Mem: resource.MustParse("500Mi"),
CPU: resource.MustParse("1000m"),
},
Limit: v1alpha2.ResourceRequirements{
Mem: resource.MustParse("2Gi"),
CPU: resource.MustParse("1500m"),
},
Config: &defaultScannerConfig,
},
},
Remover: v1alpha2.ContainerConfig{
Image: v1alpha2.RepoTag{
Repo: repo("remover"),
Tag: version.BuildVersion,
},
Request: v1alpha2.ResourceRequirements{
Mem: resource.MustParse("25Mi"),
CPU: resource.MustParse("7m"),
},
Limit: v1alpha2.ResourceRequirements{
Mem: resource.MustParse("30Mi"),
CPU: resource.Quantity{},
},
Config: nil,
},
},
}
}
func repo(basename string) string {
if version.DefaultRepo == "" {
return basename
}
return fmt.Sprintf("%s/%s", version.DefaultRepo, basename)
}
================================================
FILE: api/v1alpha2/custom_conversions.go
================================================
package v1alpha2
import (
unversioned "github.com/eraser-dev/eraser/api/unversioned"
conversion "k8s.io/apimachinery/pkg/conversion"
)
//nolint:revive
func Convert_v1alpha2_ManagerConfig_To_unversioned_ManagerConfig(in *ManagerConfig, out *unversioned.ManagerConfig, s conversion.Scope) error {
return autoConvert_v1alpha2_ManagerConfig_To_unversioned_ManagerConfig(in, out, s)
}
//nolint:revive
func manualConvert_v1alpha2_Runtime_To_unversioned_RuntimeSpec(in *Runtime, out *unversioned.RuntimeSpec, _ conversion.Scope) error {
out.Name = unversioned.Runtime(string(*in))
rs, err := unversioned.ConvertRuntimeToRuntimeSpec(out.Name)
if err != nil {
return err
}
out.Address = rs.Address
return nil
}
//nolint:revive
func Convert_v1alpha2_Runtime_To_unversioned_RuntimeSpec(in *Runtime, out *unversioned.RuntimeSpec, s conversion.Scope) error {
return manualConvert_v1alpha2_Runtime_To_unversioned_RuntimeSpec(in, out, s)
}
//nolint:revive
func Convert_unversioned_ManagerConfig_To_v1alpha2_ManagerConfig(in *unversioned.ManagerConfig, out *ManagerConfig, s conversion.Scope) error {
return autoConvert_unversioned_ManagerConfig_To_v1alpha2_ManagerConfig(in, out, s)
}
//nolint:revive
func manualConvert_unversioned_RuntimeSpec_To_v1alpha2_Runtime(in *unversioned.RuntimeSpec, out *Runtime, _ conversion.Scope) error {
*out = Runtime(in.Name)
return nil
}
//nolint:revive
func Convert_unversioned_RuntimeSpec_To_v1alpha2_Runtime(in *unversioned.RuntimeSpec, out *Runtime, s conversion.Scope) error {
return manualConvert_unversioned_RuntimeSpec_To_v1alpha2_Runtime(in, out, s)
}
================================================
FILE: api/v1alpha2/doc.go
================================================
// Package v1alpha2 contains API Schema definitions for the eraser v1alpha2 API version.
// +k8s:conversion-gen=github.com/eraser-dev/eraser/api/unversioned
package v1alpha2
================================================
FILE: api/v1alpha2/eraserconfig_types.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"encoding/json"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type (
Duration time.Duration
Runtime string
)
const (
RuntimeContainerd Runtime = "containerd"
RuntimeDockerShim Runtime = "dockershim"
RuntimeCrio Runtime = "crio"
)
func (td *Duration) UnmarshalJSON(b []byte) error {
var str string
err := json.Unmarshal(b, &str)
if err != nil {
return err
}
pd, err := time.ParseDuration(str)
if err != nil {
return err
}
*td = Duration(pd)
return nil
}
func (r *Runtime) UnmarshalJSON(b []byte) error {
var str string
err := json.Unmarshal(b, &str)
if err != nil {
return err
}
switch rt := Runtime(str); rt {
case RuntimeContainerd, RuntimeDockerShim, RuntimeCrio:
*r = rt
default:
return fmt.Errorf("cannot determine runtime type: %s. valid values are containerd, dockershim, or crio", str)
}
return nil
}
func (td *Duration) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, time.Duration(*td).String())), nil
}
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
type OptionalContainerConfig struct {
Enabled bool `json:"enabled,omitempty"`
ContainerConfig `json:",inline"`
}
type ContainerConfig struct {
Image RepoTag `json:"image,omitempty"`
Request ResourceRequirements `json:"request,omitempty"`
Limit ResourceRequirements `json:"limit,omitempty"`
Config *string `json:"config,omitempty"`
Volumes []corev1.Volume `json:"volumes,omitempty"`
}
type ManagerConfig struct {
Runtime Runtime `json:"runtime,omitempty"`
OTLPEndpoint string `json:"otlpEndpoint,omitempty"`
LogLevel string `json:"logLevel,omitempty"`
Scheduling ScheduleConfig `json:"scheduling,omitempty"`
Profile ProfileConfig `json:"profile,omitempty"`
ImageJob ImageJobConfig `json:"imageJob,omitempty"`
PullSecrets []string `json:"pullSecrets,omitempty"`
NodeFilter NodeFilterConfig `json:"nodeFilter,omitempty"`
PriorityClassName string `json:"priorityClassName,omitempty"`
}
type ScheduleConfig struct {
RepeatInterval Duration `json:"repeatInterval,omitempty"`
BeginImmediately bool `json:"beginImmediately,omitempty"`
}
type ProfileConfig struct {
Enabled bool `json:"enabled,omitempty"`
Port int `json:"port,omitempty"`
}
type ImageJobConfig struct {
SuccessRatio float64 `json:"successRatio,omitempty"`
Cleanup ImageJobCleanupConfig `json:"cleanup,omitempty"`
}
type ImageJobCleanupConfig struct {
DelayOnSuccess Duration `json:"delayOnSuccess,omitempty"`
DelayOnFailure Duration `json:"delayOnFailure,omitempty"`
}
type NodeFilterConfig struct {
Type string `json:"type,omitempty"`
Selectors []string `json:"selectors,omitempty"`
}
type ResourceRequirements struct {
Mem resource.Quantity `json:"mem,omitempty"`
CPU resource.Quantity `json:"cpu,omitempty"`
}
type RepoTag struct {
Repo string `json:"repo,omitempty"`
Tag string `json:"tag,omitempty"`
}
type Components struct {
Collector OptionalContainerConfig `json:"collector,omitempty"`
Scanner OptionalContainerConfig `json:"scanner,omitempty"`
Remover ContainerConfig `json:"remover,omitempty"`
}
//+kubebuilder:object:root=true
// EraserConfig is the Schema for the eraserconfigs API.
type EraserConfig struct {
metav1.TypeMeta `json:",inline"`
Manager ManagerConfig `json:"manager"`
Components Components `json:"components"`
}
func init() {
SchemeBuilder.Register(&EraserConfig{})
}
================================================
FILE: api/v1alpha2/groupversion_info.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha2 contains API Schema definitions for the eraser.sh v1alpha2 API group
// +kubebuilder:object:generate=true
// +groupName=eraser.sh
package v1alpha2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "eraser.sh", Version: "v1alpha2"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
localSchemeBuilder = runtime.NewSchemeBuilder(SchemeBuilder.AddToScheme)
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
================================================
FILE: api/v1alpha2/zz_generated.conversion.go
================================================
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha2
import (
unsafe "unsafe"
unversioned "github.com/eraser-dev/eraser/api/unversioned"
v1 "k8s.io/api/core/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*Components)(nil), (*unversioned.Components)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_Components_To_unversioned_Components(a.(*Components), b.(*unversioned.Components), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.Components)(nil), (*Components)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_Components_To_v1alpha2_Components(a.(*unversioned.Components), b.(*Components), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ContainerConfig)(nil), (*unversioned.ContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_ContainerConfig_To_unversioned_ContainerConfig(a.(*ContainerConfig), b.(*unversioned.ContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ContainerConfig)(nil), (*ContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ContainerConfig_To_v1alpha2_ContainerConfig(a.(*unversioned.ContainerConfig), b.(*ContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*EraserConfig)(nil), (*unversioned.EraserConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_EraserConfig_To_unversioned_EraserConfig(a.(*EraserConfig), b.(*unversioned.EraserConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.EraserConfig)(nil), (*EraserConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_EraserConfig_To_v1alpha2_EraserConfig(a.(*unversioned.EraserConfig), b.(*EraserConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJobCleanupConfig)(nil), (*unversioned.ImageJobCleanupConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(a.(*ImageJobCleanupConfig), b.(*unversioned.ImageJobCleanupConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJobCleanupConfig)(nil), (*ImageJobCleanupConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJobCleanupConfig_To_v1alpha2_ImageJobCleanupConfig(a.(*unversioned.ImageJobCleanupConfig), b.(*ImageJobCleanupConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJobConfig)(nil), (*unversioned.ImageJobConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_ImageJobConfig_To_unversioned_ImageJobConfig(a.(*ImageJobConfig), b.(*unversioned.ImageJobConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJobConfig)(nil), (*ImageJobConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJobConfig_To_v1alpha2_ImageJobConfig(a.(*unversioned.ImageJobConfig), b.(*ImageJobConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*NodeFilterConfig)(nil), (*unversioned.NodeFilterConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_NodeFilterConfig_To_unversioned_NodeFilterConfig(a.(*NodeFilterConfig), b.(*unversioned.NodeFilterConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.NodeFilterConfig)(nil), (*NodeFilterConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_NodeFilterConfig_To_v1alpha2_NodeFilterConfig(a.(*unversioned.NodeFilterConfig), b.(*NodeFilterConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*OptionalContainerConfig)(nil), (*unversioned.OptionalContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(a.(*OptionalContainerConfig), b.(*unversioned.OptionalContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.OptionalContainerConfig)(nil), (*OptionalContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_OptionalContainerConfig_To_v1alpha2_OptionalContainerConfig(a.(*unversioned.OptionalContainerConfig), b.(*OptionalContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ProfileConfig)(nil), (*unversioned.ProfileConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_ProfileConfig_To_unversioned_ProfileConfig(a.(*ProfileConfig), b.(*unversioned.ProfileConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ProfileConfig)(nil), (*ProfileConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ProfileConfig_To_v1alpha2_ProfileConfig(a.(*unversioned.ProfileConfig), b.(*ProfileConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*RepoTag)(nil), (*unversioned.RepoTag)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_RepoTag_To_unversioned_RepoTag(a.(*RepoTag), b.(*unversioned.RepoTag), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.RepoTag)(nil), (*RepoTag)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_RepoTag_To_v1alpha2_RepoTag(a.(*unversioned.RepoTag), b.(*RepoTag), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ResourceRequirements)(nil), (*unversioned.ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_ResourceRequirements_To_unversioned_ResourceRequirements(a.(*ResourceRequirements), b.(*unversioned.ResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ResourceRequirements)(nil), (*ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ResourceRequirements_To_v1alpha2_ResourceRequirements(a.(*unversioned.ResourceRequirements), b.(*ResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ScheduleConfig)(nil), (*unversioned.ScheduleConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_ScheduleConfig_To_unversioned_ScheduleConfig(a.(*ScheduleConfig), b.(*unversioned.ScheduleConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ScheduleConfig)(nil), (*ScheduleConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ScheduleConfig_To_v1alpha2_ScheduleConfig(a.(*unversioned.ScheduleConfig), b.(*ScheduleConfig), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*unversioned.ManagerConfig)(nil), (*ManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ManagerConfig_To_v1alpha2_ManagerConfig(a.(*unversioned.ManagerConfig), b.(*ManagerConfig), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*unversioned.RuntimeSpec)(nil), (*Runtime)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_RuntimeSpec_To_v1alpha2_Runtime(a.(*unversioned.RuntimeSpec), b.(*Runtime), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*ManagerConfig)(nil), (*unversioned.ManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_ManagerConfig_To_unversioned_ManagerConfig(a.(*ManagerConfig), b.(*unversioned.ManagerConfig), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*Runtime)(nil), (*unversioned.RuntimeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_Runtime_To_unversioned_RuntimeSpec(a.(*Runtime), b.(*unversioned.RuntimeSpec), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha2_Components_To_unversioned_Components(in *Components, out *unversioned.Components, s conversion.Scope) error {
if err := Convert_v1alpha2_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(&in.Collector, &out.Collector, s); err != nil {
return err
}
if err := Convert_v1alpha2_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(&in.Scanner, &out.Scanner, s); err != nil {
return err
}
if err := Convert_v1alpha2_ContainerConfig_To_unversioned_ContainerConfig(&in.Remover, &out.Remover, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha2_Components_To_unversioned_Components is an autogenerated conversion function.
func Convert_v1alpha2_Components_To_unversioned_Components(in *Components, out *unversioned.Components, s conversion.Scope) error {
return autoConvert_v1alpha2_Components_To_unversioned_Components(in, out, s)
}
func autoConvert_unversioned_Components_To_v1alpha2_Components(in *unversioned.Components, out *Components, s conversion.Scope) error {
if err := Convert_unversioned_OptionalContainerConfig_To_v1alpha2_OptionalContainerConfig(&in.Collector, &out.Collector, s); err != nil {
return err
}
if err := Convert_unversioned_OptionalContainerConfig_To_v1alpha2_OptionalContainerConfig(&in.Scanner, &out.Scanner, s); err != nil {
return err
}
if err := Convert_unversioned_ContainerConfig_To_v1alpha2_ContainerConfig(&in.Remover, &out.Remover, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_Components_To_v1alpha2_Components is an autogenerated conversion function.
func Convert_unversioned_Components_To_v1alpha2_Components(in *unversioned.Components, out *Components, s conversion.Scope) error {
return autoConvert_unversioned_Components_To_v1alpha2_Components(in, out, s)
}
func autoConvert_v1alpha2_ContainerConfig_To_unversioned_ContainerConfig(in *ContainerConfig, out *unversioned.ContainerConfig, s conversion.Scope) error {
if err := Convert_v1alpha2_RepoTag_To_unversioned_RepoTag(&in.Image, &out.Image, s); err != nil {
return err
}
if err := Convert_v1alpha2_ResourceRequirements_To_unversioned_ResourceRequirements(&in.Request, &out.Request, s); err != nil {
return err
}
if err := Convert_v1alpha2_ResourceRequirements_To_unversioned_ResourceRequirements(&in.Limit, &out.Limit, s); err != nil {
return err
}
out.Config = (*string)(unsafe.Pointer(in.Config))
out.Volumes = *(*[]v1.Volume)(unsafe.Pointer(&in.Volumes))
return nil
}
// Convert_v1alpha2_ContainerConfig_To_unversioned_ContainerConfig is an autogenerated conversion function.
func Convert_v1alpha2_ContainerConfig_To_unversioned_ContainerConfig(in *ContainerConfig, out *unversioned.ContainerConfig, s conversion.Scope) error {
return autoConvert_v1alpha2_ContainerConfig_To_unversioned_ContainerConfig(in, out, s)
}
func autoConvert_unversioned_ContainerConfig_To_v1alpha2_ContainerConfig(in *unversioned.ContainerConfig, out *ContainerConfig, s conversion.Scope) error {
if err := Convert_unversioned_RepoTag_To_v1alpha2_RepoTag(&in.Image, &out.Image, s); err != nil {
return err
}
if err := Convert_unversioned_ResourceRequirements_To_v1alpha2_ResourceRequirements(&in.Request, &out.Request, s); err != nil {
return err
}
if err := Convert_unversioned_ResourceRequirements_To_v1alpha2_ResourceRequirements(&in.Limit, &out.Limit, s); err != nil {
return err
}
out.Config = (*string)(unsafe.Pointer(in.Config))
out.Volumes = *(*[]v1.Volume)(unsafe.Pointer(&in.Volumes))
return nil
}
// Convert_unversioned_ContainerConfig_To_v1alpha2_ContainerConfig is an autogenerated conversion function.
func Convert_unversioned_ContainerConfig_To_v1alpha2_ContainerConfig(in *unversioned.ContainerConfig, out *ContainerConfig, s conversion.Scope) error {
return autoConvert_unversioned_ContainerConfig_To_v1alpha2_ContainerConfig(in, out, s)
}
func autoConvert_v1alpha2_EraserConfig_To_unversioned_EraserConfig(in *EraserConfig, out *unversioned.EraserConfig, s conversion.Scope) error {
if err := Convert_v1alpha2_ManagerConfig_To_unversioned_ManagerConfig(&in.Manager, &out.Manager, s); err != nil {
return err
}
if err := Convert_v1alpha2_Components_To_unversioned_Components(&in.Components, &out.Components, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha2_EraserConfig_To_unversioned_EraserConfig is an autogenerated conversion function.
func Convert_v1alpha2_EraserConfig_To_unversioned_EraserConfig(in *EraserConfig, out *unversioned.EraserConfig, s conversion.Scope) error {
return autoConvert_v1alpha2_EraserConfig_To_unversioned_EraserConfig(in, out, s)
}
func autoConvert_unversioned_EraserConfig_To_v1alpha2_EraserConfig(in *unversioned.EraserConfig, out *EraserConfig, s conversion.Scope) error {
if err := Convert_unversioned_ManagerConfig_To_v1alpha2_ManagerConfig(&in.Manager, &out.Manager, s); err != nil {
return err
}
if err := Convert_unversioned_Components_To_v1alpha2_Components(&in.Components, &out.Components, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_EraserConfig_To_v1alpha2_EraserConfig is an autogenerated conversion function.
func Convert_unversioned_EraserConfig_To_v1alpha2_EraserConfig(in *unversioned.EraserConfig, out *EraserConfig, s conversion.Scope) error {
return autoConvert_unversioned_EraserConfig_To_v1alpha2_EraserConfig(in, out, s)
}
func autoConvert_v1alpha2_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(in *ImageJobCleanupConfig, out *unversioned.ImageJobCleanupConfig, s conversion.Scope) error {
out.DelayOnSuccess = unversioned.Duration(in.DelayOnSuccess)
out.DelayOnFailure = unversioned.Duration(in.DelayOnFailure)
return nil
}
// Convert_v1alpha2_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig is an autogenerated conversion function.
func Convert_v1alpha2_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(in *ImageJobCleanupConfig, out *unversioned.ImageJobCleanupConfig, s conversion.Scope) error {
return autoConvert_v1alpha2_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(in, out, s)
}
func autoConvert_unversioned_ImageJobCleanupConfig_To_v1alpha2_ImageJobCleanupConfig(in *unversioned.ImageJobCleanupConfig, out *ImageJobCleanupConfig, s conversion.Scope) error {
out.DelayOnSuccess = Duration(in.DelayOnSuccess)
out.DelayOnFailure = Duration(in.DelayOnFailure)
return nil
}
// Convert_unversioned_ImageJobCleanupConfig_To_v1alpha2_ImageJobCleanupConfig is an autogenerated conversion function.
func Convert_unversioned_ImageJobCleanupConfig_To_v1alpha2_ImageJobCleanupConfig(in *unversioned.ImageJobCleanupConfig, out *ImageJobCleanupConfig, s conversion.Scope) error {
return autoConvert_unversioned_ImageJobCleanupConfig_To_v1alpha2_ImageJobCleanupConfig(in, out, s)
}
func autoConvert_v1alpha2_ImageJobConfig_To_unversioned_ImageJobConfig(in *ImageJobConfig, out *unversioned.ImageJobConfig, s conversion.Scope) error {
out.SuccessRatio = in.SuccessRatio
if err := Convert_v1alpha2_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(&in.Cleanup, &out.Cleanup, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha2_ImageJobConfig_To_unversioned_ImageJobConfig is an autogenerated conversion function.
func Convert_v1alpha2_ImageJobConfig_To_unversioned_ImageJobConfig(in *ImageJobConfig, out *unversioned.ImageJobConfig, s conversion.Scope) error {
return autoConvert_v1alpha2_ImageJobConfig_To_unversioned_ImageJobConfig(in, out, s)
}
func autoConvert_unversioned_ImageJobConfig_To_v1alpha2_ImageJobConfig(in *unversioned.ImageJobConfig, out *ImageJobConfig, s conversion.Scope) error {
out.SuccessRatio = in.SuccessRatio
if err := Convert_unversioned_ImageJobCleanupConfig_To_v1alpha2_ImageJobCleanupConfig(&in.Cleanup, &out.Cleanup, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_ImageJobConfig_To_v1alpha2_ImageJobConfig is an autogenerated conversion function.
func Convert_unversioned_ImageJobConfig_To_v1alpha2_ImageJobConfig(in *unversioned.ImageJobConfig, out *ImageJobConfig, s conversion.Scope) error {
return autoConvert_unversioned_ImageJobConfig_To_v1alpha2_ImageJobConfig(in, out, s)
}
func autoConvert_v1alpha2_ManagerConfig_To_unversioned_ManagerConfig(in *ManagerConfig, out *unversioned.ManagerConfig, s conversion.Scope) error {
if err := Convert_v1alpha2_Runtime_To_unversioned_RuntimeSpec(&in.Runtime, &out.Runtime, s); err != nil {
return err
}
out.OTLPEndpoint = in.OTLPEndpoint
out.LogLevel = in.LogLevel
if err := Convert_v1alpha2_ScheduleConfig_To_unversioned_ScheduleConfig(&in.Scheduling, &out.Scheduling, s); err != nil {
return err
}
if err := Convert_v1alpha2_ProfileConfig_To_unversioned_ProfileConfig(&in.Profile, &out.Profile, s); err != nil {
return err
}
if err := Convert_v1alpha2_ImageJobConfig_To_unversioned_ImageJobConfig(&in.ImageJob, &out.ImageJob, s); err != nil {
return err
}
out.PullSecrets = *(*[]string)(unsafe.Pointer(&in.PullSecrets))
if err := Convert_v1alpha2_NodeFilterConfig_To_unversioned_NodeFilterConfig(&in.NodeFilter, &out.NodeFilter, s); err != nil {
return err
}
out.PriorityClassName = in.PriorityClassName
return nil
}
func autoConvert_unversioned_ManagerConfig_To_v1alpha2_ManagerConfig(in *unversioned.ManagerConfig, out *ManagerConfig, s conversion.Scope) error {
if err := Convert_unversioned_RuntimeSpec_To_v1alpha2_Runtime(&in.Runtime, &out.Runtime, s); err != nil {
return err
}
out.OTLPEndpoint = in.OTLPEndpoint
out.LogLevel = in.LogLevel
if err := Convert_unversioned_ScheduleConfig_To_v1alpha2_ScheduleConfig(&in.Scheduling, &out.Scheduling, s); err != nil {
return err
}
if err := Convert_unversioned_ProfileConfig_To_v1alpha2_ProfileConfig(&in.Profile, &out.Profile, s); err != nil {
return err
}
if err := Convert_unversioned_ImageJobConfig_To_v1alpha2_ImageJobConfig(&in.ImageJob, &out.ImageJob, s); err != nil {
return err
}
out.PullSecrets = *(*[]string)(unsafe.Pointer(&in.PullSecrets))
if err := Convert_unversioned_NodeFilterConfig_To_v1alpha2_NodeFilterConfig(&in.NodeFilter, &out.NodeFilter, s); err != nil {
return err
}
out.PriorityClassName = in.PriorityClassName
// WARNING: in.AdditionalPodLabels requires manual conversion: does not exist in peer-type
return nil
}
func autoConvert_v1alpha2_NodeFilterConfig_To_unversioned_NodeFilterConfig(in *NodeFilterConfig, out *unversioned.NodeFilterConfig, s conversion.Scope) error {
out.Type = in.Type
out.Selectors = *(*[]string)(unsafe.Pointer(&in.Selectors))
return nil
}
// Convert_v1alpha2_NodeFilterConfig_To_unversioned_NodeFilterConfig is an autogenerated conversion function.
func Convert_v1alpha2_NodeFilterConfig_To_unversioned_NodeFilterConfig(in *NodeFilterConfig, out *unversioned.NodeFilterConfig, s conversion.Scope) error {
return autoConvert_v1alpha2_NodeFilterConfig_To_unversioned_NodeFilterConfig(in, out, s)
}
func autoConvert_unversioned_NodeFilterConfig_To_v1alpha2_NodeFilterConfig(in *unversioned.NodeFilterConfig, out *NodeFilterConfig, s conversion.Scope) error {
out.Type = in.Type
out.Selectors = *(*[]string)(unsafe.Pointer(&in.Selectors))
return nil
}
// Convert_unversioned_NodeFilterConfig_To_v1alpha2_NodeFilterConfig is an autogenerated conversion function.
func Convert_unversioned_NodeFilterConfig_To_v1alpha2_NodeFilterConfig(in *unversioned.NodeFilterConfig, out *NodeFilterConfig, s conversion.Scope) error {
return autoConvert_unversioned_NodeFilterConfig_To_v1alpha2_NodeFilterConfig(in, out, s)
}
func autoConvert_v1alpha2_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(in *OptionalContainerConfig, out *unversioned.OptionalContainerConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
if err := Convert_v1alpha2_ContainerConfig_To_unversioned_ContainerConfig(&in.ContainerConfig, &out.ContainerConfig, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha2_OptionalContainerConfig_To_unversioned_OptionalContainerConfig is an autogenerated conversion function.
func Convert_v1alpha2_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(in *OptionalContainerConfig, out *unversioned.OptionalContainerConfig, s conversion.Scope) error {
return autoConvert_v1alpha2_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(in, out, s)
}
func autoConvert_unversioned_OptionalContainerConfig_To_v1alpha2_OptionalContainerConfig(in *unversioned.OptionalContainerConfig, out *OptionalContainerConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
if err := Convert_unversioned_ContainerConfig_To_v1alpha2_ContainerConfig(&in.ContainerConfig, &out.ContainerConfig, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_OptionalContainerConfig_To_v1alpha2_OptionalContainerConfig is an autogenerated conversion function.
func Convert_unversioned_OptionalContainerConfig_To_v1alpha2_OptionalContainerConfig(in *unversioned.OptionalContainerConfig, out *OptionalContainerConfig, s conversion.Scope) error {
return autoConvert_unversioned_OptionalContainerConfig_To_v1alpha2_OptionalContainerConfig(in, out, s)
}
func autoConvert_v1alpha2_ProfileConfig_To_unversioned_ProfileConfig(in *ProfileConfig, out *unversioned.ProfileConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Port = in.Port
return nil
}
// Convert_v1alpha2_ProfileConfig_To_unversioned_ProfileConfig is an autogenerated conversion function.
func Convert_v1alpha2_ProfileConfig_To_unversioned_ProfileConfig(in *ProfileConfig, out *unversioned.ProfileConfig, s conversion.Scope) error {
return autoConvert_v1alpha2_ProfileConfig_To_unversioned_ProfileConfig(in, out, s)
}
func autoConvert_unversioned_ProfileConfig_To_v1alpha2_ProfileConfig(in *unversioned.ProfileConfig, out *ProfileConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Port = in.Port
return nil
}
// Convert_unversioned_ProfileConfig_To_v1alpha2_ProfileConfig is an autogenerated conversion function.
func Convert_unversioned_ProfileConfig_To_v1alpha2_ProfileConfig(in *unversioned.ProfileConfig, out *ProfileConfig, s conversion.Scope) error {
return autoConvert_unversioned_ProfileConfig_To_v1alpha2_ProfileConfig(in, out, s)
}
func autoConvert_v1alpha2_RepoTag_To_unversioned_RepoTag(in *RepoTag, out *unversioned.RepoTag, s conversion.Scope) error {
out.Repo = in.Repo
out.Tag = in.Tag
return nil
}
// Convert_v1alpha2_RepoTag_To_unversioned_RepoTag is an autogenerated conversion function.
func Convert_v1alpha2_RepoTag_To_unversioned_RepoTag(in *RepoTag, out *unversioned.RepoTag, s conversion.Scope) error {
return autoConvert_v1alpha2_RepoTag_To_unversioned_RepoTag(in, out, s)
}
func autoConvert_unversioned_RepoTag_To_v1alpha2_RepoTag(in *unversioned.RepoTag, out *RepoTag, s conversion.Scope) error {
out.Repo = in.Repo
out.Tag = in.Tag
return nil
}
// Convert_unversioned_RepoTag_To_v1alpha2_RepoTag is an autogenerated conversion function.
func Convert_unversioned_RepoTag_To_v1alpha2_RepoTag(in *unversioned.RepoTag, out *RepoTag, s conversion.Scope) error {
return autoConvert_unversioned_RepoTag_To_v1alpha2_RepoTag(in, out, s)
}
func autoConvert_v1alpha2_ResourceRequirements_To_unversioned_ResourceRequirements(in *ResourceRequirements, out *unversioned.ResourceRequirements, s conversion.Scope) error {
out.Mem = in.Mem
out.CPU = in.CPU
return nil
}
// Convert_v1alpha2_ResourceRequirements_To_unversioned_ResourceRequirements is an autogenerated conversion function.
func Convert_v1alpha2_ResourceRequirements_To_unversioned_ResourceRequirements(in *ResourceRequirements, out *unversioned.ResourceRequirements, s conversion.Scope) error {
return autoConvert_v1alpha2_ResourceRequirements_To_unversioned_ResourceRequirements(in, out, s)
}
func autoConvert_unversioned_ResourceRequirements_To_v1alpha2_ResourceRequirements(in *unversioned.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error {
out.Mem = in.Mem
out.CPU = in.CPU
return nil
}
// Convert_unversioned_ResourceRequirements_To_v1alpha2_ResourceRequirements is an autogenerated conversion function.
func Convert_unversioned_ResourceRequirements_To_v1alpha2_ResourceRequirements(in *unversioned.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error {
return autoConvert_unversioned_ResourceRequirements_To_v1alpha2_ResourceRequirements(in, out, s)
}
func autoConvert_v1alpha2_ScheduleConfig_To_unversioned_ScheduleConfig(in *ScheduleConfig, out *unversioned.ScheduleConfig, s conversion.Scope) error {
out.RepeatInterval = unversioned.Duration(in.RepeatInterval)
out.BeginImmediately = in.BeginImmediately
return nil
}
// Convert_v1alpha2_ScheduleConfig_To_unversioned_ScheduleConfig is an autogenerated conversion function.
func Convert_v1alpha2_ScheduleConfig_To_unversioned_ScheduleConfig(in *ScheduleConfig, out *unversioned.ScheduleConfig, s conversion.Scope) error {
return autoConvert_v1alpha2_ScheduleConfig_To_unversioned_ScheduleConfig(in, out, s)
}
func autoConvert_unversioned_ScheduleConfig_To_v1alpha2_ScheduleConfig(in *unversioned.ScheduleConfig, out *ScheduleConfig, s conversion.Scope) error {
out.RepeatInterval = Duration(in.RepeatInterval)
out.BeginImmediately = in.BeginImmediately
return nil
}
// Convert_unversioned_ScheduleConfig_To_v1alpha2_ScheduleConfig is an autogenerated conversion function.
func Convert_unversioned_ScheduleConfig_To_v1alpha2_ScheduleConfig(in *unversioned.ScheduleConfig, out *ScheduleConfig, s conversion.Scope) error {
return autoConvert_unversioned_ScheduleConfig_To_v1alpha2_ScheduleConfig(in, out, s)
}
================================================
FILE: api/v1alpha2/zz_generated.deepcopy.go
================================================
//go:build !ignore_autogenerated
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha2
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Components) DeepCopyInto(out *Components) {
*out = *in
in.Collector.DeepCopyInto(&out.Collector)
in.Scanner.DeepCopyInto(&out.Scanner)
in.Remover.DeepCopyInto(&out.Remover)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Components.
func (in *Components) DeepCopy() *Components {
if in == nil {
return nil
}
out := new(Components)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerConfig) DeepCopyInto(out *ContainerConfig) {
*out = *in
out.Image = in.Image
in.Request.DeepCopyInto(&out.Request)
in.Limit.DeepCopyInto(&out.Limit)
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(string)
**out = **in
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerConfig.
func (in *ContainerConfig) DeepCopy() *ContainerConfig {
if in == nil {
return nil
}
out := new(ContainerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EraserConfig) DeepCopyInto(out *EraserConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Manager.DeepCopyInto(&out.Manager)
in.Components.DeepCopyInto(&out.Components)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EraserConfig.
func (in *EraserConfig) DeepCopy() *EraserConfig {
if in == nil {
return nil
}
out := new(EraserConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EraserConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobCleanupConfig) DeepCopyInto(out *ImageJobCleanupConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobCleanupConfig.
func (in *ImageJobCleanupConfig) DeepCopy() *ImageJobCleanupConfig {
if in == nil {
return nil
}
out := new(ImageJobCleanupConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobConfig) DeepCopyInto(out *ImageJobConfig) {
*out = *in
out.Cleanup = in.Cleanup
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobConfig.
func (in *ImageJobConfig) DeepCopy() *ImageJobConfig {
if in == nil {
return nil
}
out := new(ImageJobConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManagerConfig) DeepCopyInto(out *ManagerConfig) {
*out = *in
out.Scheduling = in.Scheduling
out.Profile = in.Profile
out.ImageJob = in.ImageJob
if in.PullSecrets != nil {
in, out := &in.PullSecrets, &out.PullSecrets
*out = make([]string, len(*in))
copy(*out, *in)
}
in.NodeFilter.DeepCopyInto(&out.NodeFilter)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagerConfig.
func (in *ManagerConfig) DeepCopy() *ManagerConfig {
if in == nil {
return nil
}
out := new(ManagerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeFilterConfig) DeepCopyInto(out *NodeFilterConfig) {
*out = *in
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFilterConfig.
func (in *NodeFilterConfig) DeepCopy() *NodeFilterConfig {
if in == nil {
return nil
}
out := new(NodeFilterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OptionalContainerConfig) DeepCopyInto(out *OptionalContainerConfig) {
*out = *in
in.ContainerConfig.DeepCopyInto(&out.ContainerConfig)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalContainerConfig.
func (in *OptionalContainerConfig) DeepCopy() *OptionalContainerConfig {
if in == nil {
return nil
}
out := new(OptionalContainerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProfileConfig) DeepCopyInto(out *ProfileConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileConfig.
func (in *ProfileConfig) DeepCopy() *ProfileConfig {
if in == nil {
return nil
}
out := new(ProfileConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RepoTag) DeepCopyInto(out *RepoTag) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepoTag.
func (in *RepoTag) DeepCopy() *RepoTag {
if in == nil {
return nil
}
out := new(RepoTag)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) {
*out = *in
out.Mem = in.Mem.DeepCopy()
out.CPU = in.CPU.DeepCopy()
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements.
func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
if in == nil {
return nil
}
out := new(ResourceRequirements)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduleConfig) DeepCopyInto(out *ScheduleConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleConfig.
func (in *ScheduleConfig) DeepCopy() *ScheduleConfig {
if in == nil {
return nil
}
out := new(ScheduleConfig)
in.DeepCopyInto(out)
return out
}
================================================
FILE: api/v1alpha3/config/config.go
================================================
package config
import (
"fmt"
"time"
"github.com/eraser-dev/eraser/api/v1alpha3"
"github.com/eraser-dev/eraser/version"
"k8s.io/apimachinery/pkg/api/resource"
)
var defaultScannerConfig = `
cacheDir: /var/lib/trivy
dbRepo: ghcr.io/aquasecurity/trivy-db
deleteFailedImages: true
deleteEOLImages: true
vulnerabilities:
ignoreUnfixed: false
types:
- os
- library
securityChecks: # need to be documented; determined by trivy, not us
- vuln
severities:
- CRITICAL
- HIGH
- MEDIUM
- LOW
`
const (
noDelay = v1alpha3.Duration(0)
oneDay = v1alpha3.Duration(time.Hour * 24)
)
func Default() *v1alpha3.EraserConfig {
return &v1alpha3.EraserConfig{
Manager: v1alpha3.ManagerConfig{
Runtime: v1alpha3.RuntimeSpec{
Name: v1alpha3.RuntimeContainerd,
Address: "unix:///run/containerd/containerd.sock",
},
OTLPEndpoint: "",
LogLevel: "info",
Scheduling: v1alpha3.ScheduleConfig{
RepeatInterval: v1alpha3.Duration(oneDay),
BeginImmediately: true,
},
Profile: v1alpha3.ProfileConfig{
Enabled: false,
Port: 6060,
},
ImageJob: v1alpha3.ImageJobConfig{
SuccessRatio: 1.0,
Cleanup: v1alpha3.ImageJobCleanupConfig{
DelayOnSuccess: noDelay,
DelayOnFailure: oneDay,
},
},
PullSecrets: []string{},
NodeFilter: v1alpha3.NodeFilterConfig{
Type: "exclude",
Selectors: []string{
"eraser.sh/cleanup.filter",
},
},
AdditionalPodLabels: map[string]string{},
},
Components: v1alpha3.Components{
Collector: v1alpha3.OptionalContainerConfig{
Enabled: false,
ContainerConfig: v1alpha3.ContainerConfig{
Image: v1alpha3.RepoTag{
Repo: repo("collector"),
Tag: version.BuildVersion,
},
Request: v1alpha3.ResourceRequirements{
Mem: resource.MustParse("25Mi"),
CPU: resource.MustParse("7m"),
},
Limit: v1alpha3.ResourceRequirements{
Mem: resource.MustParse("500Mi"),
CPU: resource.Quantity{},
},
Config: nil,
},
},
Scanner: v1alpha3.OptionalContainerConfig{
Enabled: false,
ContainerConfig: v1alpha3.ContainerConfig{
Image: v1alpha3.RepoTag{
Repo: repo("eraser-trivy-scanner"),
Tag: version.BuildVersion,
},
Request: v1alpha3.ResourceRequirements{
Mem: resource.MustParse("500Mi"),
CPU: resource.MustParse("1000m"),
},
Limit: v1alpha3.ResourceRequirements{
Mem: resource.MustParse("2Gi"),
CPU: resource.MustParse("1500m"),
},
Config: &defaultScannerConfig,
},
},
Remover: v1alpha3.ContainerConfig{
Image: v1alpha3.RepoTag{
Repo: repo("remover"),
Tag: version.BuildVersion,
},
Request: v1alpha3.ResourceRequirements{
Mem: resource.MustParse("25Mi"),
CPU: resource.MustParse("7m"),
},
Limit: v1alpha3.ResourceRequirements{
Mem: resource.MustParse("30Mi"),
CPU: resource.Quantity{},
},
Config: nil,
},
},
}
}
func repo(basename string) string {
if version.DefaultRepo == "" {
return basename
}
return fmt.Sprintf("%s/%s", version.DefaultRepo, basename)
}
================================================
FILE: api/v1alpha3/doc.go
================================================
// +k8s:conversion-gen=github.com/eraser-dev/eraser/api/unversioned
package v1alpha3
================================================
FILE: api/v1alpha3/eraserconfig_types.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
"encoding/json"
"fmt"
"net/url"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type (
Duration time.Duration
Runtime string
RuntimeSpec struct {
Name Runtime `json:"name"`
Address string `json:"address"`
}
)
const (
RuntimeContainerd Runtime = "containerd"
RuntimeDockerShim Runtime = "dockershim"
RuntimeCrio Runtime = "crio"
RuntimeNotProvided Runtime = ""
ContainerdPath = "/run/containerd/containerd.sock"
DockerPath = "/run/dockershim.sock"
CrioPath = "/run/crio/crio.sock"
)
func ConvertRuntimeToRuntimeSpec(r Runtime) (RuntimeSpec, error) {
var rs RuntimeSpec
switch r {
case RuntimeContainerd:
rs = RuntimeSpec{Name: RuntimeContainerd, Address: fmt.Sprintf("unix://%s", ContainerdPath)}
case RuntimeDockerShim:
rs = RuntimeSpec{Name: RuntimeDockerShim, Address: fmt.Sprintf("unix://%s", DockerPath)}
case RuntimeCrio:
rs = RuntimeSpec{Name: RuntimeCrio, Address: fmt.Sprintf("unix://%s", CrioPath)}
default:
return rs, fmt.Errorf("invalid runtime: valid names are %s, %s, %s", RuntimeContainerd, RuntimeDockerShim, RuntimeCrio)
}
return rs, nil
}
func (td *Duration) UnmarshalJSON(b []byte) error {
var str string
err := json.Unmarshal(b, &str)
if err != nil {
return err
}
pd, err := time.ParseDuration(str)
if err != nil {
return err
}
*td = Duration(pd)
return nil
}
func (td *Duration) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%s"`, time.Duration(*td).String())), nil
}
func (r *RuntimeSpec) UnmarshalJSON(b []byte) error {
// create temp RuntimeSpec to prevent recursive error into this function when using unmarshall to check validity of provided RuntimeSpec
type TempRuntimeSpec struct {
Name string `json:"name"`
Address string `json:"address"`
}
var rs TempRuntimeSpec
err := json.Unmarshal(b, &rs)
if err != nil {
return fmt.Errorf("error unmarshalling into TempRuntimeSpec %v %s", err, string(b))
}
switch rt := Runtime(rs.Name); rt {
// make sure user provided Runtime is valid
case RuntimeContainerd, RuntimeDockerShim, RuntimeCrio:
if rs.Address != "" {
// check that provided RuntimeAddress is valid
u, err := url.Parse(rs.Address)
if err != nil {
return err
}
switch u.Scheme {
case "tcp", "unix":
default:
return fmt.Errorf("invalid RuntimeAddress scheme: valid schemes for runtime socket address are `tcp` and `unix`")
}
r.Name = Runtime(rs.Name)
r.Address = rs.Address
return nil
}
// if RuntimeAddress is not provided, get defaults
converted, err := ConvertRuntimeToRuntimeSpec(rt)
if err != nil {
return err
}
*r = converted
case RuntimeNotProvided:
if rs.Address != "" {
return fmt.Errorf("runtime name must be provided with address")
}
// if empty name and address, use containerd as default
r.Name = RuntimeContainerd
r.Address = fmt.Sprintf("unix://%s", ContainerdPath)
default:
return fmt.Errorf("invalid runtime: valid names are %s, %s, %s", RuntimeContainerd, RuntimeDockerShim, RuntimeCrio)
}
return nil
}
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
type OptionalContainerConfig struct {
Enabled bool `json:"enabled,omitempty"`
ContainerConfig `json:",inline"`
}
type ContainerConfig struct {
Image RepoTag `json:"image,omitempty"`
Request ResourceRequirements `json:"request,omitempty"`
Limit ResourceRequirements `json:"limit,omitempty"`
Config *string `json:"config,omitempty"`
Volumes []corev1.Volume `json:"volumes,omitempty"`
}
type ManagerConfig struct {
Runtime RuntimeSpec `json:"runtime,omitempty"`
OTLPEndpoint string `json:"otlpEndpoint,omitempty"`
LogLevel string `json:"logLevel,omitempty"`
Scheduling ScheduleConfig `json:"scheduling,omitempty"`
Profile ProfileConfig `json:"profile,omitempty"`
ImageJob ImageJobConfig `json:"imageJob,omitempty"`
PullSecrets []string `json:"pullSecrets,omitempty"`
NodeFilter NodeFilterConfig `json:"nodeFilter,omitempty"`
PriorityClassName string `json:"priorityClassName,omitempty"`
AdditionalPodLabels map[string]string `json:"additionalPodLabels,omitempty"`
}
type ScheduleConfig struct {
RepeatInterval Duration `json:"repeatInterval,omitempty"`
BeginImmediately bool `json:"beginImmediately,omitempty"`
}
type ProfileConfig struct {
Enabled bool `json:"enabled,omitempty"`
Port int `json:"port,omitempty"`
}
type ImageJobConfig struct {
SuccessRatio float64 `json:"successRatio,omitempty"`
Cleanup ImageJobCleanupConfig `json:"cleanup,omitempty"`
}
type ImageJobCleanupConfig struct {
DelayOnSuccess Duration `json:"delayOnSuccess,omitempty"`
DelayOnFailure Duration `json:"delayOnFailure,omitempty"`
}
type NodeFilterConfig struct {
Type string `json:"type,omitempty"`
Selectors []string `json:"selectors,omitempty"`
}
type ResourceRequirements struct {
Mem resource.Quantity `json:"mem,omitempty"`
CPU resource.Quantity `json:"cpu,omitempty"`
}
type RepoTag struct {
Repo string `json:"repo,omitempty"`
Tag string `json:"tag,omitempty"`
}
type Components struct {
Collector OptionalContainerConfig `json:"collector,omitempty"`
Scanner OptionalContainerConfig `json:"scanner,omitempty"`
Remover ContainerConfig `json:"remover,omitempty"`
}
//+kubebuilder:object:root=true
// EraserConfig is the Schema for the eraserconfigs API.
type EraserConfig struct {
metav1.TypeMeta `json:",inline"`
Manager ManagerConfig `json:"manager"`
Components Components `json:"components"`
}
func init() {
SchemeBuilder.Register(&EraserConfig{})
}
================================================
FILE: api/v1alpha3/groupversion_info.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha3 contains API Schema definitions for the eraser.sh v1alpha3 API group
// +kubebuilder:object:generate=true
// +groupName=eraser.sh
package v1alpha3
import (
runtime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "eraser.sh", Version: "v1alpha3"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
localSchemeBuilder = runtime.NewSchemeBuilder(SchemeBuilder.AddToScheme)
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
================================================
FILE: api/v1alpha3/runtime_spec_test.go
================================================
package v1alpha3
import (
"encoding/json"
"fmt"
"testing"
)
func TestConvertRuntimeToRuntimeSpec(t *testing.T) {
type testCase struct {
input Runtime
expected RuntimeSpec
shouldErr bool
}
tests := map[string]testCase{
"Containerd": {
input: RuntimeContainerd,
expected: RuntimeSpec{Name: RuntimeContainerd, Address: fmt.Sprintf("unix://%s", ContainerdPath)},
shouldErr: false,
},
"DockerShim": {
input: RuntimeDockerShim,
expected: RuntimeSpec{Name: RuntimeDockerShim, Address: fmt.Sprintf("unix://%s", DockerPath)},
shouldErr: false,
},
"Crio": {
input: RuntimeCrio,
expected: RuntimeSpec{Name: RuntimeCrio, Address: fmt.Sprintf("unix://%s", CrioPath)},
shouldErr: false,
},
"InvalidRuntime": {
input: "invalid",
expected: RuntimeSpec{},
shouldErr: true,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
result, err := ConvertRuntimeToRuntimeSpec(test.input)
if test.shouldErr && err == nil {
t.Errorf("Expected an error but got nil")
}
if !test.shouldErr && err != nil {
t.Errorf("Error: %v", err)
}
if result != test.expected {
t.Errorf("Unexpected result. Expected %v, but got %v", test.expected, result)
}
})
}
}
func TestUnmarshalJSON(t *testing.T) {
type testCase struct {
input []byte
expected RuntimeSpec
shouldErr bool
}
tests := map[string]testCase{
"ValidContainerd": {
input: []byte(`{"name": "containerd", "address": "unix:///run/containerd/containerd.sock"}`),
expected: RuntimeSpec{Name: RuntimeContainerd, Address: fmt.Sprintf("unix://%s", ContainerdPath)},
shouldErr: false,
},
"ValidDockerShim": {
input: []byte(`{"name": "dockershim", "address": "unix:///run/dockershim.sock"}`),
expected: RuntimeSpec{Name: RuntimeDockerShim, Address: fmt.Sprintf("unix://%s", DockerPath)},
shouldErr: false,
},
"ValidCrio": {
input: []byte(`{"name": "crio", "address": "unix:///run/crio/crio.sock"}`),
expected: RuntimeSpec{Name: RuntimeCrio, Address: fmt.Sprintf("unix://%s", CrioPath)},
shouldErr: false,
},
"InvalidName": {
input: []byte(`{"name": "invalid", "address": "unix:///invalid"}`),
expected: RuntimeSpec{},
shouldErr: true,
},
"InvalidAddressScheme": {
input: []byte(`{"name": "containerd", "address": "http://invalid"}`),
expected: RuntimeSpec{},
shouldErr: true,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
var rs RuntimeSpec
err := json.Unmarshal(test.input, &rs)
if test.shouldErr && err == nil {
t.Error("Expected an error but got nil")
}
if !test.shouldErr && err != nil {
t.Errorf("Error: %v", err)
}
if rs != test.expected {
t.Errorf("Unexpected result. Expected %v, but got %v", test.expected, rs)
}
})
}
}
================================================
FILE: api/v1alpha3/zz_generated.conversion.go
================================================
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by conversion-gen. DO NOT EDIT.
package v1alpha3
import (
unsafe "unsafe"
unversioned "github.com/eraser-dev/eraser/api/unversioned"
v1 "k8s.io/api/core/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
)
func init() {
localSchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*Components)(nil), (*unversioned.Components)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_Components_To_unversioned_Components(a.(*Components), b.(*unversioned.Components), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.Components)(nil), (*Components)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_Components_To_v1alpha3_Components(a.(*unversioned.Components), b.(*Components), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ContainerConfig)(nil), (*unversioned.ContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_ContainerConfig_To_unversioned_ContainerConfig(a.(*ContainerConfig), b.(*unversioned.ContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ContainerConfig)(nil), (*ContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ContainerConfig_To_v1alpha3_ContainerConfig(a.(*unversioned.ContainerConfig), b.(*ContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*EraserConfig)(nil), (*unversioned.EraserConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_EraserConfig_To_unversioned_EraserConfig(a.(*EraserConfig), b.(*unversioned.EraserConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.EraserConfig)(nil), (*EraserConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_EraserConfig_To_v1alpha3_EraserConfig(a.(*unversioned.EraserConfig), b.(*EraserConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJobCleanupConfig)(nil), (*unversioned.ImageJobCleanupConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(a.(*ImageJobCleanupConfig), b.(*unversioned.ImageJobCleanupConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJobCleanupConfig)(nil), (*ImageJobCleanupConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJobCleanupConfig_To_v1alpha3_ImageJobCleanupConfig(a.(*unversioned.ImageJobCleanupConfig), b.(*ImageJobCleanupConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ImageJobConfig)(nil), (*unversioned.ImageJobConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_ImageJobConfig_To_unversioned_ImageJobConfig(a.(*ImageJobConfig), b.(*unversioned.ImageJobConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ImageJobConfig)(nil), (*ImageJobConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ImageJobConfig_To_v1alpha3_ImageJobConfig(a.(*unversioned.ImageJobConfig), b.(*ImageJobConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ManagerConfig)(nil), (*unversioned.ManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_ManagerConfig_To_unversioned_ManagerConfig(a.(*ManagerConfig), b.(*unversioned.ManagerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ManagerConfig)(nil), (*ManagerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ManagerConfig_To_v1alpha3_ManagerConfig(a.(*unversioned.ManagerConfig), b.(*ManagerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*NodeFilterConfig)(nil), (*unversioned.NodeFilterConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_NodeFilterConfig_To_unversioned_NodeFilterConfig(a.(*NodeFilterConfig), b.(*unversioned.NodeFilterConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.NodeFilterConfig)(nil), (*NodeFilterConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_NodeFilterConfig_To_v1alpha3_NodeFilterConfig(a.(*unversioned.NodeFilterConfig), b.(*NodeFilterConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*OptionalContainerConfig)(nil), (*unversioned.OptionalContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(a.(*OptionalContainerConfig), b.(*unversioned.OptionalContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.OptionalContainerConfig)(nil), (*OptionalContainerConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_OptionalContainerConfig_To_v1alpha3_OptionalContainerConfig(a.(*unversioned.OptionalContainerConfig), b.(*OptionalContainerConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ProfileConfig)(nil), (*unversioned.ProfileConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_ProfileConfig_To_unversioned_ProfileConfig(a.(*ProfileConfig), b.(*unversioned.ProfileConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ProfileConfig)(nil), (*ProfileConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ProfileConfig_To_v1alpha3_ProfileConfig(a.(*unversioned.ProfileConfig), b.(*ProfileConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*RepoTag)(nil), (*unversioned.RepoTag)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_RepoTag_To_unversioned_RepoTag(a.(*RepoTag), b.(*unversioned.RepoTag), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.RepoTag)(nil), (*RepoTag)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_RepoTag_To_v1alpha3_RepoTag(a.(*unversioned.RepoTag), b.(*RepoTag), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ResourceRequirements)(nil), (*unversioned.ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_ResourceRequirements_To_unversioned_ResourceRequirements(a.(*ResourceRequirements), b.(*unversioned.ResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ResourceRequirements)(nil), (*ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ResourceRequirements_To_v1alpha3_ResourceRequirements(a.(*unversioned.ResourceRequirements), b.(*ResourceRequirements), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*RuntimeSpec)(nil), (*unversioned.RuntimeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_RuntimeSpec_To_unversioned_RuntimeSpec(a.(*RuntimeSpec), b.(*unversioned.RuntimeSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.RuntimeSpec)(nil), (*RuntimeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_RuntimeSpec_To_v1alpha3_RuntimeSpec(a.(*unversioned.RuntimeSpec), b.(*RuntimeSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ScheduleConfig)(nil), (*unversioned.ScheduleConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_ScheduleConfig_To_unversioned_ScheduleConfig(a.(*ScheduleConfig), b.(*unversioned.ScheduleConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*unversioned.ScheduleConfig)(nil), (*ScheduleConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_unversioned_ScheduleConfig_To_v1alpha3_ScheduleConfig(a.(*unversioned.ScheduleConfig), b.(*ScheduleConfig), scope)
}); err != nil {
return err
}
return nil
}
func autoConvert_v1alpha3_Components_To_unversioned_Components(in *Components, out *unversioned.Components, s conversion.Scope) error {
if err := Convert_v1alpha3_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(&in.Collector, &out.Collector, s); err != nil {
return err
}
if err := Convert_v1alpha3_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(&in.Scanner, &out.Scanner, s); err != nil {
return err
}
if err := Convert_v1alpha3_ContainerConfig_To_unversioned_ContainerConfig(&in.Remover, &out.Remover, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha3_Components_To_unversioned_Components is an autogenerated conversion function.
func Convert_v1alpha3_Components_To_unversioned_Components(in *Components, out *unversioned.Components, s conversion.Scope) error {
return autoConvert_v1alpha3_Components_To_unversioned_Components(in, out, s)
}
func autoConvert_unversioned_Components_To_v1alpha3_Components(in *unversioned.Components, out *Components, s conversion.Scope) error {
if err := Convert_unversioned_OptionalContainerConfig_To_v1alpha3_OptionalContainerConfig(&in.Collector, &out.Collector, s); err != nil {
return err
}
if err := Convert_unversioned_OptionalContainerConfig_To_v1alpha3_OptionalContainerConfig(&in.Scanner, &out.Scanner, s); err != nil {
return err
}
if err := Convert_unversioned_ContainerConfig_To_v1alpha3_ContainerConfig(&in.Remover, &out.Remover, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_Components_To_v1alpha3_Components is an autogenerated conversion function.
func Convert_unversioned_Components_To_v1alpha3_Components(in *unversioned.Components, out *Components, s conversion.Scope) error {
return autoConvert_unversioned_Components_To_v1alpha3_Components(in, out, s)
}
func autoConvert_v1alpha3_ContainerConfig_To_unversioned_ContainerConfig(in *ContainerConfig, out *unversioned.ContainerConfig, s conversion.Scope) error {
if err := Convert_v1alpha3_RepoTag_To_unversioned_RepoTag(&in.Image, &out.Image, s); err != nil {
return err
}
if err := Convert_v1alpha3_ResourceRequirements_To_unversioned_ResourceRequirements(&in.Request, &out.Request, s); err != nil {
return err
}
if err := Convert_v1alpha3_ResourceRequirements_To_unversioned_ResourceRequirements(&in.Limit, &out.Limit, s); err != nil {
return err
}
out.Config = (*string)(unsafe.Pointer(in.Config))
out.Volumes = *(*[]v1.Volume)(unsafe.Pointer(&in.Volumes))
return nil
}
// Convert_v1alpha3_ContainerConfig_To_unversioned_ContainerConfig is an autogenerated conversion function.
func Convert_v1alpha3_ContainerConfig_To_unversioned_ContainerConfig(in *ContainerConfig, out *unversioned.ContainerConfig, s conversion.Scope) error {
return autoConvert_v1alpha3_ContainerConfig_To_unversioned_ContainerConfig(in, out, s)
}
func autoConvert_unversioned_ContainerConfig_To_v1alpha3_ContainerConfig(in *unversioned.ContainerConfig, out *ContainerConfig, s conversion.Scope) error {
if err := Convert_unversioned_RepoTag_To_v1alpha3_RepoTag(&in.Image, &out.Image, s); err != nil {
return err
}
if err := Convert_unversioned_ResourceRequirements_To_v1alpha3_ResourceRequirements(&in.Request, &out.Request, s); err != nil {
return err
}
if err := Convert_unversioned_ResourceRequirements_To_v1alpha3_ResourceRequirements(&in.Limit, &out.Limit, s); err != nil {
return err
}
out.Config = (*string)(unsafe.Pointer(in.Config))
out.Volumes = *(*[]v1.Volume)(unsafe.Pointer(&in.Volumes))
return nil
}
// Convert_unversioned_ContainerConfig_To_v1alpha3_ContainerConfig is an autogenerated conversion function.
func Convert_unversioned_ContainerConfig_To_v1alpha3_ContainerConfig(in *unversioned.ContainerConfig, out *ContainerConfig, s conversion.Scope) error {
return autoConvert_unversioned_ContainerConfig_To_v1alpha3_ContainerConfig(in, out, s)
}
func autoConvert_v1alpha3_EraserConfig_To_unversioned_EraserConfig(in *EraserConfig, out *unversioned.EraserConfig, s conversion.Scope) error {
if err := Convert_v1alpha3_ManagerConfig_To_unversioned_ManagerConfig(&in.Manager, &out.Manager, s); err != nil {
return err
}
if err := Convert_v1alpha3_Components_To_unversioned_Components(&in.Components, &out.Components, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha3_EraserConfig_To_unversioned_EraserConfig is an autogenerated conversion function.
func Convert_v1alpha3_EraserConfig_To_unversioned_EraserConfig(in *EraserConfig, out *unversioned.EraserConfig, s conversion.Scope) error {
return autoConvert_v1alpha3_EraserConfig_To_unversioned_EraserConfig(in, out, s)
}
func autoConvert_unversioned_EraserConfig_To_v1alpha3_EraserConfig(in *unversioned.EraserConfig, out *EraserConfig, s conversion.Scope) error {
if err := Convert_unversioned_ManagerConfig_To_v1alpha3_ManagerConfig(&in.Manager, &out.Manager, s); err != nil {
return err
}
if err := Convert_unversioned_Components_To_v1alpha3_Components(&in.Components, &out.Components, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_EraserConfig_To_v1alpha3_EraserConfig is an autogenerated conversion function.
func Convert_unversioned_EraserConfig_To_v1alpha3_EraserConfig(in *unversioned.EraserConfig, out *EraserConfig, s conversion.Scope) error {
return autoConvert_unversioned_EraserConfig_To_v1alpha3_EraserConfig(in, out, s)
}
func autoConvert_v1alpha3_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(in *ImageJobCleanupConfig, out *unversioned.ImageJobCleanupConfig, s conversion.Scope) error {
out.DelayOnSuccess = unversioned.Duration(in.DelayOnSuccess)
out.DelayOnFailure = unversioned.Duration(in.DelayOnFailure)
return nil
}
// Convert_v1alpha3_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig is an autogenerated conversion function.
func Convert_v1alpha3_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(in *ImageJobCleanupConfig, out *unversioned.ImageJobCleanupConfig, s conversion.Scope) error {
return autoConvert_v1alpha3_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(in, out, s)
}
func autoConvert_unversioned_ImageJobCleanupConfig_To_v1alpha3_ImageJobCleanupConfig(in *unversioned.ImageJobCleanupConfig, out *ImageJobCleanupConfig, s conversion.Scope) error {
out.DelayOnSuccess = Duration(in.DelayOnSuccess)
out.DelayOnFailure = Duration(in.DelayOnFailure)
return nil
}
// Convert_unversioned_ImageJobCleanupConfig_To_v1alpha3_ImageJobCleanupConfig is an autogenerated conversion function.
func Convert_unversioned_ImageJobCleanupConfig_To_v1alpha3_ImageJobCleanupConfig(in *unversioned.ImageJobCleanupConfig, out *ImageJobCleanupConfig, s conversion.Scope) error {
return autoConvert_unversioned_ImageJobCleanupConfig_To_v1alpha3_ImageJobCleanupConfig(in, out, s)
}
func autoConvert_v1alpha3_ImageJobConfig_To_unversioned_ImageJobConfig(in *ImageJobConfig, out *unversioned.ImageJobConfig, s conversion.Scope) error {
out.SuccessRatio = in.SuccessRatio
if err := Convert_v1alpha3_ImageJobCleanupConfig_To_unversioned_ImageJobCleanupConfig(&in.Cleanup, &out.Cleanup, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha3_ImageJobConfig_To_unversioned_ImageJobConfig is an autogenerated conversion function.
func Convert_v1alpha3_ImageJobConfig_To_unversioned_ImageJobConfig(in *ImageJobConfig, out *unversioned.ImageJobConfig, s conversion.Scope) error {
return autoConvert_v1alpha3_ImageJobConfig_To_unversioned_ImageJobConfig(in, out, s)
}
func autoConvert_unversioned_ImageJobConfig_To_v1alpha3_ImageJobConfig(in *unversioned.ImageJobConfig, out *ImageJobConfig, s conversion.Scope) error {
out.SuccessRatio = in.SuccessRatio
if err := Convert_unversioned_ImageJobCleanupConfig_To_v1alpha3_ImageJobCleanupConfig(&in.Cleanup, &out.Cleanup, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_ImageJobConfig_To_v1alpha3_ImageJobConfig is an autogenerated conversion function.
func Convert_unversioned_ImageJobConfig_To_v1alpha3_ImageJobConfig(in *unversioned.ImageJobConfig, out *ImageJobConfig, s conversion.Scope) error {
return autoConvert_unversioned_ImageJobConfig_To_v1alpha3_ImageJobConfig(in, out, s)
}
func autoConvert_v1alpha3_ManagerConfig_To_unversioned_ManagerConfig(in *ManagerConfig, out *unversioned.ManagerConfig, s conversion.Scope) error {
if err := Convert_v1alpha3_RuntimeSpec_To_unversioned_RuntimeSpec(&in.Runtime, &out.Runtime, s); err != nil {
return err
}
out.OTLPEndpoint = in.OTLPEndpoint
out.LogLevel = in.LogLevel
if err := Convert_v1alpha3_ScheduleConfig_To_unversioned_ScheduleConfig(&in.Scheduling, &out.Scheduling, s); err != nil {
return err
}
if err := Convert_v1alpha3_ProfileConfig_To_unversioned_ProfileConfig(&in.Profile, &out.Profile, s); err != nil {
return err
}
if err := Convert_v1alpha3_ImageJobConfig_To_unversioned_ImageJobConfig(&in.ImageJob, &out.ImageJob, s); err != nil {
return err
}
out.PullSecrets = *(*[]string)(unsafe.Pointer(&in.PullSecrets))
if err := Convert_v1alpha3_NodeFilterConfig_To_unversioned_NodeFilterConfig(&in.NodeFilter, &out.NodeFilter, s); err != nil {
return err
}
out.PriorityClassName = in.PriorityClassName
out.AdditionalPodLabels = *(*map[string]string)(unsafe.Pointer(&in.AdditionalPodLabels))
return nil
}
// Convert_v1alpha3_ManagerConfig_To_unversioned_ManagerConfig is an autogenerated conversion function.
func Convert_v1alpha3_ManagerConfig_To_unversioned_ManagerConfig(in *ManagerConfig, out *unversioned.ManagerConfig, s conversion.Scope) error {
return autoConvert_v1alpha3_ManagerConfig_To_unversioned_ManagerConfig(in, out, s)
}
func autoConvert_unversioned_ManagerConfig_To_v1alpha3_ManagerConfig(in *unversioned.ManagerConfig, out *ManagerConfig, s conversion.Scope) error {
if err := Convert_unversioned_RuntimeSpec_To_v1alpha3_RuntimeSpec(&in.Runtime, &out.Runtime, s); err != nil {
return err
}
out.OTLPEndpoint = in.OTLPEndpoint
out.LogLevel = in.LogLevel
if err := Convert_unversioned_ScheduleConfig_To_v1alpha3_ScheduleConfig(&in.Scheduling, &out.Scheduling, s); err != nil {
return err
}
if err := Convert_unversioned_ProfileConfig_To_v1alpha3_ProfileConfig(&in.Profile, &out.Profile, s); err != nil {
return err
}
if err := Convert_unversioned_ImageJobConfig_To_v1alpha3_ImageJobConfig(&in.ImageJob, &out.ImageJob, s); err != nil {
return err
}
out.PullSecrets = *(*[]string)(unsafe.Pointer(&in.PullSecrets))
if err := Convert_unversioned_NodeFilterConfig_To_v1alpha3_NodeFilterConfig(&in.NodeFilter, &out.NodeFilter, s); err != nil {
return err
}
out.PriorityClassName = in.PriorityClassName
out.AdditionalPodLabels = *(*map[string]string)(unsafe.Pointer(&in.AdditionalPodLabels))
return nil
}
// Convert_unversioned_ManagerConfig_To_v1alpha3_ManagerConfig is an autogenerated conversion function.
func Convert_unversioned_ManagerConfig_To_v1alpha3_ManagerConfig(in *unversioned.ManagerConfig, out *ManagerConfig, s conversion.Scope) error {
return autoConvert_unversioned_ManagerConfig_To_v1alpha3_ManagerConfig(in, out, s)
}
func autoConvert_v1alpha3_NodeFilterConfig_To_unversioned_NodeFilterConfig(in *NodeFilterConfig, out *unversioned.NodeFilterConfig, s conversion.Scope) error {
out.Type = in.Type
out.Selectors = *(*[]string)(unsafe.Pointer(&in.Selectors))
return nil
}
// Convert_v1alpha3_NodeFilterConfig_To_unversioned_NodeFilterConfig is an autogenerated conversion function.
func Convert_v1alpha3_NodeFilterConfig_To_unversioned_NodeFilterConfig(in *NodeFilterConfig, out *unversioned.NodeFilterConfig, s conversion.Scope) error {
return autoConvert_v1alpha3_NodeFilterConfig_To_unversioned_NodeFilterConfig(in, out, s)
}
func autoConvert_unversioned_NodeFilterConfig_To_v1alpha3_NodeFilterConfig(in *unversioned.NodeFilterConfig, out *NodeFilterConfig, s conversion.Scope) error {
out.Type = in.Type
out.Selectors = *(*[]string)(unsafe.Pointer(&in.Selectors))
return nil
}
// Convert_unversioned_NodeFilterConfig_To_v1alpha3_NodeFilterConfig is an autogenerated conversion function.
func Convert_unversioned_NodeFilterConfig_To_v1alpha3_NodeFilterConfig(in *unversioned.NodeFilterConfig, out *NodeFilterConfig, s conversion.Scope) error {
return autoConvert_unversioned_NodeFilterConfig_To_v1alpha3_NodeFilterConfig(in, out, s)
}
func autoConvert_v1alpha3_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(in *OptionalContainerConfig, out *unversioned.OptionalContainerConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
if err := Convert_v1alpha3_ContainerConfig_To_unversioned_ContainerConfig(&in.ContainerConfig, &out.ContainerConfig, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha3_OptionalContainerConfig_To_unversioned_OptionalContainerConfig is an autogenerated conversion function.
func Convert_v1alpha3_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(in *OptionalContainerConfig, out *unversioned.OptionalContainerConfig, s conversion.Scope) error {
return autoConvert_v1alpha3_OptionalContainerConfig_To_unversioned_OptionalContainerConfig(in, out, s)
}
func autoConvert_unversioned_OptionalContainerConfig_To_v1alpha3_OptionalContainerConfig(in *unversioned.OptionalContainerConfig, out *OptionalContainerConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
if err := Convert_unversioned_ContainerConfig_To_v1alpha3_ContainerConfig(&in.ContainerConfig, &out.ContainerConfig, s); err != nil {
return err
}
return nil
}
// Convert_unversioned_OptionalContainerConfig_To_v1alpha3_OptionalContainerConfig is an autogenerated conversion function.
func Convert_unversioned_OptionalContainerConfig_To_v1alpha3_OptionalContainerConfig(in *unversioned.OptionalContainerConfig, out *OptionalContainerConfig, s conversion.Scope) error {
return autoConvert_unversioned_OptionalContainerConfig_To_v1alpha3_OptionalContainerConfig(in, out, s)
}
func autoConvert_v1alpha3_ProfileConfig_To_unversioned_ProfileConfig(in *ProfileConfig, out *unversioned.ProfileConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Port = in.Port
return nil
}
// Convert_v1alpha3_ProfileConfig_To_unversioned_ProfileConfig is an autogenerated conversion function.
func Convert_v1alpha3_ProfileConfig_To_unversioned_ProfileConfig(in *ProfileConfig, out *unversioned.ProfileConfig, s conversion.Scope) error {
return autoConvert_v1alpha3_ProfileConfig_To_unversioned_ProfileConfig(in, out, s)
}
func autoConvert_unversioned_ProfileConfig_To_v1alpha3_ProfileConfig(in *unversioned.ProfileConfig, out *ProfileConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
out.Port = in.Port
return nil
}
// Convert_unversioned_ProfileConfig_To_v1alpha3_ProfileConfig is an autogenerated conversion function.
func Convert_unversioned_ProfileConfig_To_v1alpha3_ProfileConfig(in *unversioned.ProfileConfig, out *ProfileConfig, s conversion.Scope) error {
return autoConvert_unversioned_ProfileConfig_To_v1alpha3_ProfileConfig(in, out, s)
}
func autoConvert_v1alpha3_RepoTag_To_unversioned_RepoTag(in *RepoTag, out *unversioned.RepoTag, s conversion.Scope) error {
out.Repo = in.Repo
out.Tag = in.Tag
return nil
}
// Convert_v1alpha3_RepoTag_To_unversioned_RepoTag is an autogenerated conversion function.
func Convert_v1alpha3_RepoTag_To_unversioned_RepoTag(in *RepoTag, out *unversioned.RepoTag, s conversion.Scope) error {
return autoConvert_v1alpha3_RepoTag_To_unversioned_RepoTag(in, out, s)
}
func autoConvert_unversioned_RepoTag_To_v1alpha3_RepoTag(in *unversioned.RepoTag, out *RepoTag, s conversion.Scope) error {
out.Repo = in.Repo
out.Tag = in.Tag
return nil
}
// Convert_unversioned_RepoTag_To_v1alpha3_RepoTag is an autogenerated conversion function.
func Convert_unversioned_RepoTag_To_v1alpha3_RepoTag(in *unversioned.RepoTag, out *RepoTag, s conversion.Scope) error {
return autoConvert_unversioned_RepoTag_To_v1alpha3_RepoTag(in, out, s)
}
func autoConvert_v1alpha3_ResourceRequirements_To_unversioned_ResourceRequirements(in *ResourceRequirements, out *unversioned.ResourceRequirements, s conversion.Scope) error {
out.Mem = in.Mem
out.CPU = in.CPU
return nil
}
// Convert_v1alpha3_ResourceRequirements_To_unversioned_ResourceRequirements is an autogenerated conversion function.
func Convert_v1alpha3_ResourceRequirements_To_unversioned_ResourceRequirements(in *ResourceRequirements, out *unversioned.ResourceRequirements, s conversion.Scope) error {
return autoConvert_v1alpha3_ResourceRequirements_To_unversioned_ResourceRequirements(in, out, s)
}
func autoConvert_unversioned_ResourceRequirements_To_v1alpha3_ResourceRequirements(in *unversioned.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error {
out.Mem = in.Mem
out.CPU = in.CPU
return nil
}
// Convert_unversioned_ResourceRequirements_To_v1alpha3_ResourceRequirements is an autogenerated conversion function.
func Convert_unversioned_ResourceRequirements_To_v1alpha3_ResourceRequirements(in *unversioned.ResourceRequirements, out *ResourceRequirements, s conversion.Scope) error {
return autoConvert_unversioned_ResourceRequirements_To_v1alpha3_ResourceRequirements(in, out, s)
}
func autoConvert_v1alpha3_RuntimeSpec_To_unversioned_RuntimeSpec(in *RuntimeSpec, out *unversioned.RuntimeSpec, s conversion.Scope) error {
out.Name = unversioned.Runtime(in.Name)
out.Address = in.Address
return nil
}
// Convert_v1alpha3_RuntimeSpec_To_unversioned_RuntimeSpec is an autogenerated conversion function.
func Convert_v1alpha3_RuntimeSpec_To_unversioned_RuntimeSpec(in *RuntimeSpec, out *unversioned.RuntimeSpec, s conversion.Scope) error {
return autoConvert_v1alpha3_RuntimeSpec_To_unversioned_RuntimeSpec(in, out, s)
}
func autoConvert_unversioned_RuntimeSpec_To_v1alpha3_RuntimeSpec(in *unversioned.RuntimeSpec, out *RuntimeSpec, s conversion.Scope) error {
out.Name = Runtime(in.Name)
out.Address = in.Address
return nil
}
// Convert_unversioned_RuntimeSpec_To_v1alpha3_RuntimeSpec is an autogenerated conversion function.
func Convert_unversioned_RuntimeSpec_To_v1alpha3_RuntimeSpec(in *unversioned.RuntimeSpec, out *RuntimeSpec, s conversion.Scope) error {
return autoConvert_unversioned_RuntimeSpec_To_v1alpha3_RuntimeSpec(in, out, s)
}
func autoConvert_v1alpha3_ScheduleConfig_To_unversioned_ScheduleConfig(in *ScheduleConfig, out *unversioned.ScheduleConfig, s conversion.Scope) error {
out.RepeatInterval = unversioned.Duration(in.RepeatInterval)
out.BeginImmediately = in.BeginImmediately
return nil
}
// Convert_v1alpha3_ScheduleConfig_To_unversioned_ScheduleConfig is an autogenerated conversion function.
func Convert_v1alpha3_ScheduleConfig_To_unversioned_ScheduleConfig(in *ScheduleConfig, out *unversioned.ScheduleConfig, s conversion.Scope) error {
return autoConvert_v1alpha3_ScheduleConfig_To_unversioned_ScheduleConfig(in, out, s)
}
func autoConvert_unversioned_ScheduleConfig_To_v1alpha3_ScheduleConfig(in *unversioned.ScheduleConfig, out *ScheduleConfig, s conversion.Scope) error {
out.RepeatInterval = Duration(in.RepeatInterval)
out.BeginImmediately = in.BeginImmediately
return nil
}
// Convert_unversioned_ScheduleConfig_To_v1alpha3_ScheduleConfig is an autogenerated conversion function.
func Convert_unversioned_ScheduleConfig_To_v1alpha3_ScheduleConfig(in *unversioned.ScheduleConfig, out *ScheduleConfig, s conversion.Scope) error {
return autoConvert_unversioned_ScheduleConfig_To_v1alpha3_ScheduleConfig(in, out, s)
}
================================================
FILE: api/v1alpha3/zz_generated.deepcopy.go
================================================
//go:build !ignore_autogenerated
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha3
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Components) DeepCopyInto(out *Components) {
*out = *in
in.Collector.DeepCopyInto(&out.Collector)
in.Scanner.DeepCopyInto(&out.Scanner)
in.Remover.DeepCopyInto(&out.Remover)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Components.
func (in *Components) DeepCopy() *Components {
if in == nil {
return nil
}
out := new(Components)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContainerConfig) DeepCopyInto(out *ContainerConfig) {
*out = *in
out.Image = in.Image
in.Request.DeepCopyInto(&out.Request)
in.Limit.DeepCopyInto(&out.Limit)
if in.Config != nil {
in, out := &in.Config, &out.Config
*out = new(string)
**out = **in
}
if in.Volumes != nil {
in, out := &in.Volumes, &out.Volumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerConfig.
func (in *ContainerConfig) DeepCopy() *ContainerConfig {
if in == nil {
return nil
}
out := new(ContainerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EraserConfig) DeepCopyInto(out *EraserConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Manager.DeepCopyInto(&out.Manager)
in.Components.DeepCopyInto(&out.Components)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EraserConfig.
func (in *EraserConfig) DeepCopy() *EraserConfig {
if in == nil {
return nil
}
out := new(EraserConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *EraserConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobCleanupConfig) DeepCopyInto(out *ImageJobCleanupConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobCleanupConfig.
func (in *ImageJobCleanupConfig) DeepCopy() *ImageJobCleanupConfig {
if in == nil {
return nil
}
out := new(ImageJobCleanupConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageJobConfig) DeepCopyInto(out *ImageJobConfig) {
*out = *in
out.Cleanup = in.Cleanup
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageJobConfig.
func (in *ImageJobConfig) DeepCopy() *ImageJobConfig {
if in == nil {
return nil
}
out := new(ImageJobConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManagerConfig) DeepCopyInto(out *ManagerConfig) {
*out = *in
out.Runtime = in.Runtime
out.Scheduling = in.Scheduling
out.Profile = in.Profile
out.ImageJob = in.ImageJob
if in.PullSecrets != nil {
in, out := &in.PullSecrets, &out.PullSecrets
*out = make([]string, len(*in))
copy(*out, *in)
}
in.NodeFilter.DeepCopyInto(&out.NodeFilter)
if in.AdditionalPodLabels != nil {
in, out := &in.AdditionalPodLabels, &out.AdditionalPodLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagerConfig.
func (in *ManagerConfig) DeepCopy() *ManagerConfig {
if in == nil {
return nil
}
out := new(ManagerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeFilterConfig) DeepCopyInto(out *NodeFilterConfig) {
*out = *in
if in.Selectors != nil {
in, out := &in.Selectors, &out.Selectors
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFilterConfig.
func (in *NodeFilterConfig) DeepCopy() *NodeFilterConfig {
if in == nil {
return nil
}
out := new(NodeFilterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OptionalContainerConfig) DeepCopyInto(out *OptionalContainerConfig) {
*out = *in
in.ContainerConfig.DeepCopyInto(&out.ContainerConfig)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalContainerConfig.
func (in *OptionalContainerConfig) DeepCopy() *OptionalContainerConfig {
if in == nil {
return nil
}
out := new(OptionalContainerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProfileConfig) DeepCopyInto(out *ProfileConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileConfig.
func (in *ProfileConfig) DeepCopy() *ProfileConfig {
if in == nil {
return nil
}
out := new(ProfileConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RepoTag) DeepCopyInto(out *RepoTag) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepoTag.
func (in *RepoTag) DeepCopy() *RepoTag {
if in == nil {
return nil
}
out := new(RepoTag)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) {
*out = *in
out.Mem = in.Mem.DeepCopy()
out.CPU = in.CPU.DeepCopy()
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements.
func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
if in == nil {
return nil
}
out := new(ResourceRequirements)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RuntimeSpec) DeepCopyInto(out *RuntimeSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeSpec.
func (in *RuntimeSpec) DeepCopy() *RuntimeSpec {
if in == nil {
return nil
}
out := new(RuntimeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScheduleConfig) DeepCopyInto(out *ScheduleConfig) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleConfig.
func (in *ScheduleConfig) DeepCopy() *ScheduleConfig {
if in == nil {
return nil
}
out := new(ScheduleConfig)
in.DeepCopyInto(out)
return out
}
================================================
FILE: build/version.sh
================================================
#!/bin/bash
# borrowed from sigs.k8s.io/cluster-api-provider-azure/hack/version.sh and modified
set -o errexit
set -o pipefail
version::get_version_vars() {
# shellcheck disable=SC1083
GIT_COMMIT="$(git rev-parse HEAD^{commit})"
if git_status=$(git status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
GIT_TREE_STATE="clean"
else
GIT_TREE_STATE="dirty"
fi
# borrowed from k8s.io/hack/lib/version.sh
# Use git describe to find the version based on tags.
if GIT_VERSION=$(git describe --tags --abbrev=14 2>/dev/null); then
# This translates the "git describe" to an actual semver.org
# compatible semantic version that looks something like this:
# v1.1.0-alpha.0.6+84c76d1142ea4d
# shellcheck disable=SC2001
DASHES_IN_VERSION=$(echo "${GIT_VERSION}" | sed "s/[^-]//g")
if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then
# We have distance to subversion (v1.1.0-subversion-1-gCommitHash)
# shellcheck disable=SC2001
GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\-\2/")
elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then
# We have distance to base tag (v1.1.0-1-gCommitHash)
# shellcheck disable=SC2001
GIT_VERSION=$(echo "${GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/-\1/")
# TODO: What should the output of this command look like?
# For example, v1.1.0-32-gfeb4736460af8f maps to v1.1.0-32-f, do we want the trailing "-f" or not?
fi
if [[ "${GIT_TREE_STATE}" == "dirty" ]]; then
# git describe --dirty only considers changes to existing files, but
# that is problematic since new untracked .go files affect the build,
# so use our idea of "dirty" from git status instead.
GIT_VERSION+="-dirty"
fi
# Try to match the "git describe" output to a regex to try to extract
# the "major" and "minor" versions and whether this is the exact tagged
# version or whether the tree is between two tagged versions.
if [[ "${GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then
GIT_MAJOR=${BASH_REMATCH[1]}
GIT_MINOR=${BASH_REMATCH[2]}
fi
# If GIT_VERSION is not a valid Semantic Version, then exit with error
if ! [[ "${GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then
echo "GIT_VERSION should be a valid Semantic Version. Current value: ${GIT_VERSION}"
echo "Please see more details here: https://semver.org"
exit 1
fi
fi
if [[ -z ${SOURCE_DATE_EPOCH} ]]; then
SOURCE_DATE="$(git show -s --format=%cI HEAD)"
SOURCE_DATE_EPOCH="$(date -u --date "${SOURCE_DATE}" +%s)"
fi
}
# Prints the value that needs to be passed to the -ldflags parameter of go build
version::ldflags() {
version::get_version_vars
local -a ldflags
function add_ldflag() {
local key=${1}
local val=${2}
ldflags+=(
"-X 'github.com/eraser-dev/eraser/version.${key}=${val}'"
)
}
add_ldflag "buildTime" "${SOURCE_DATE_EPOCH}"
add_ldflag "vcsCommit" "${GIT_COMMIT}"
add_ldflag "vcsState" "${GIT_TREE_STATE}"
if [[ ! -z ${GIT_VERSION} ]]; then
add_ldflag "BuildVersion" "${GIT_VERSION}"
add_ldflag "vcsMajor" "${GIT_MAJOR}"
add_ldflag "vcsMinor" "${GIT_MINOR}"
elif [[ -n $1 ]]; then
add_ldflag "BuildVersion" "$1"
fi
# The -ldflags parameter takes a single string, so join the output.
echo "${ldflags[*]-}"
}
version::ldflags $1
================================================
FILE: config/crd/bases/_.yaml
================================================
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
spec:
group: ""
names:
kind: ""
plural: ""
scope: ""
versions: null
================================================
FILE: config/crd/bases/eraser.sh_imagejobs.yaml
================================================
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: imagejobs.eraser.sh
spec:
group: eraser.sh
names:
kind: ImageJob
listKind: ImageJobList
plural: imagejobs
singular: imagejob
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ImageJob is the Schema for the imagejobs API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
status:
description: ImageJobStatus defines the observed state of ImageJob.
properties:
deleteAfter:
description: Time to delay deletion until
format: date-time
type: string
desired:
description: desired number of pods
type: integer
failed:
description: number of pods that failed
type: integer
phase:
description: job running, successfully completed, or failed
type: string
skipped:
description: number of nodes that were skipped e.g. because they are
not a linux node
type: integer
succeeded:
description: number of pods that completed successfully
type: integer
required:
- desired
- failed
- phase
- skipped
- succeeded
type: object
type: object
served: true
storage: true
subresources:
status: {}
- deprecated: true
deprecationWarning: v1alpha1 of the eraser API has been deprecated. Please migrate
to v1.
name: v1alpha1
schema:
openAPIV3Schema:
description: ImageJob is the Schema for the imagejobs API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
status:
description: ImageJobStatus defines the observed state of ImageJob.
properties:
deleteAfter:
description: Time to delay deletion until
format: date-time
type: string
desired:
description: desired number of pods
type: integer
failed:
description: number of pods that failed
type: integer
phase:
description: job running, successfully completed, or failed
type: string
skipped:
description: number of nodes that were skipped e.g. because they are
not a linux node
type: integer
succeeded:
description: number of pods that completed successfully
type: integer
required:
- desired
- failed
- phase
- skipped
- succeeded
type: object
type: object
served: true
storage: false
subresources:
status: {}
================================================
FILE: config/crd/bases/eraser.sh_imagelists.yaml
================================================
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: imagelists.eraser.sh
spec:
group: eraser.sh
names:
kind: ImageList
listKind: ImageListList
plural: imagelists
singular: imagelist
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ImageList is the Schema for the imagelists API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: ImageListSpec defines the desired state of ImageList.
properties:
images:
description: The list of non-compliant images to delete if non-running.
items:
type: string
type: array
required:
- images
type: object
status:
description: ImageListStatus defines the observed state of ImageList.
properties:
failed:
description: Number of nodes that failed to run the job
format: int64
type: integer
skipped:
description: Number of nodes that were skipped due to a skip selector
format: int64
type: integer
success:
description: Number of nodes that successfully ran the job
format: int64
type: integer
timestamp:
description: Information when the job was completed.
format: date-time
type: string
required:
- failed
- skipped
- success
- timestamp
type: object
type: object
served: true
storage: true
subresources:
status: {}
- deprecated: true
deprecationWarning: v1alpha1 of the eraser API has been deprecated. Please migrate
to v1.
name: v1alpha1
schema:
openAPIV3Schema:
description: ImageList is the Schema for the imagelists API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: ImageListSpec defines the desired state of ImageList.
properties:
images:
description: The list of non-compliant images to delete if non-running.
items:
type: string
type: array
required:
- images
type: object
status:
description: ImageListStatus defines the observed state of ImageList.
properties:
failed:
description: Number of nodes that failed to run the job
format: int64
type: integer
skipped:
description: Number of nodes that were skipped due to a skip selector
format: int64
type: integer
success:
description: Number of nodes that successfully ran the job
format: int64
type: integer
timestamp:
description: Information when the job was completed.
format: date-time
type: string
required:
- failed
- skipped
- success
- timestamp
type: object
type: object
served: true
storage: false
subresources:
status: {}
================================================
FILE: config/crd/kustomization.yaml
================================================
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/eraser.sh_imagelists.yaml
- bases/eraser.sh_imagejobs.yaml
#+kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_imagelists.yaml
#- patches/webhook_in_eraserconfigs.yaml
#+kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_imagelists.yaml
#- patches/cainjection_in_eraserconfigs.yaml
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml
================================================
FILE: config/crd/kustomizeconfig.yaml
================================================
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
nameReference:
- kind: Service
version: v1alpha1
fieldSpecs:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/name
namespace:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/namespace
create: false
varReference:
- path: metadata/annotations
================================================
FILE: config/crd/patches/cainjection_in_eraserconfigs.yaml
================================================
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: eraserconfigs.eraser.sh
================================================
FILE: config/crd/patches/cainjection_in_imagelists.yaml
================================================
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1alpha1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: imagelists.eraser.sh
================================================
FILE: config/crd/patches/webhook_in_eraserconfigs.yaml
================================================
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: eraserconfigs.eraser.sh
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1
================================================
FILE: config/crd/patches/webhook_in_imagelists.yaml
================================================
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1alpha1
kind: CustomResourceDefinition
metadata:
name: imagelists.eraser.sh
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1alpha1
- v1
================================================
FILE: config/default/kustomization.yaml
================================================
# Adds namespace to all resources.
namespace: eraser-system
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: eraser-
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
bases:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
# - manager_auth_proxy_patch.yaml
# Mount the controller config file for loading manager configurations
# through a ComponentConfig type
# - manager_config_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service
================================================
FILE: config/default/manager_auth_proxy_patch.yaml
================================================
# This patch inject a sidecar container which is a HTTP proxy for the
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=10"
ports:
- containerPort: 8443
name: https
- name: manager
args:
- "--health-probe-bind-address=:8081"
- "--metrics-bind-address=127.0.0.1:8080"
- "--leader-elect"
================================================
FILE: config/manager/controller_manager_config.yaml
================================================
apiVersion: eraser.sh/v1alpha3
kind: EraserConfig
manager:
runtime:
name: containerd
address: unix:///run/containerd/containerd.sock
otlpEndpoint: ""
logLevel: info
scheduling:
repeatInterval: 24h
beginImmediately: true
profile:
enabled: false
port: 6060
imageJob:
successRatio: 1.0
cleanup:
delayOnSuccess: 0s
delayOnFailure: 24h
pullSecrets: [] # image pull secrets for collector/scanner/eraser
priorityClassName: "" # priority class name for collector/scanner/eraser
additionalPodLabels: {}
nodeFilter:
type: exclude # must be either exclude|include
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
components:
collector:
enabled: true
image:
repo: COLLECTOR_REPO
tag: COLLECTOR_TAG
request:
mem: 25Mi
cpu: 7m
limit:
mem: 500Mi
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#how-pods-with-resource-limits-are-run
cpu: 0
scanner:
enabled: true
image:
repo: SCANNER_REPO # supply custom image for custom scanner
tag: SCANNER_TAG
request:
mem: 500Mi
cpu: 1000m
limit:
mem: 2Gi
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#how-pods-with-resource-limits-are-run
cpu: 0
# The config needs to be passed through to the scanner as yaml, as a
# single string. Because we allow custom scanner images, the scanner is
# responsible for defining a schema, parsing, and validating.
config: |
# this is the schema for the provided 'trivy-scanner'. custom scanners
# will define their own configuration.
cacheDir: /var/lib/trivy
dbRepo: ghcr.io/aquasecurity/trivy-db
deleteFailedImages: true
deleteEOLImages: true
vulnerabilities:
ignoreUnfixed: false
types:
- os
- library
securityChecks:
- vuln
severities:
- CRITICAL
- HIGH
- MEDIUM
- LOW
ignoredStatuses:
timeout:
total: 23h
perImage: 1h
volumes: []
remover:
image:
repo: REMOVER_REPO
tag: REMOVER_TAG
request:
mem: 25Mi
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#how-pods-with-resource-limits-are-run
cpu: 0
limit:
mem: 30Mi
cpu: 0
================================================
FILE: config/manager/kustomization.yaml
================================================
resources:
- manager.yaml
generatorOptions:
disableNameSuffixHash: true
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: ghcr.io/eraser-dev/eraser-manager
newTag: v1.0.0-beta.3
# DO NOT CHANGE FORMATTING:
# This must be deleted for helm chart generation, so it should all be on one line.
configMapGenerator: [ { "files": ["controller_manager_config.yaml"], "name": "manager-config" } ]
# DO NOT CHANGE FORMATTING:
# This must be deleted for helm chart generation, so it should all be on one line.
patches: [{"path":"patch.yaml","target":{"kind":"Deployment"}}]
================================================
FILE: config/manager/manager.yaml
================================================
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
labels:
control-plane: controller-manager
spec:
selector:
matchLabels:
control-plane: controller-manager
replicas: 1
template:
metadata:
labels:
control-plane: controller-manager
spec:
nodeSelector:
kubernetes.io/os: linux
containers:
- command:
- /manager
args: []
image: controller:latest
name: manager
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: OTEL_SERVICE_NAME
value: eraser-manager
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 65532
runAsGroup: 65532
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
capabilities:
drop:
- ALL
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
serviceAccountName: controller-manager
terminationGracePeriodSeconds: 10
================================================
FILE: config/manager/patch.yaml
================================================
- op: add
path: /spec/template/spec/containers/0/volumeMounts
value: []
- op: add
path: /spec/template/spec/containers/0/volumeMounts/-
value: { "name": "manager-config", "mountPath": "/config" }
- op: add
path: /spec/template/spec/volumes
value: []
- op: add
path: /spec/template/spec/volumes/-
value: { "name": "manager-config", "configMap": { "name": "manager-config" } }
- op: add
path: /spec/template/spec/containers/0/args/-
value: "--config=/config/controller_manager_config.yaml"
================================================
FILE: config/prometheus/kustomization.yaml
================================================
resources:
- monitor.yaml
================================================
FILE: config/prometheus/monitor.yaml
================================================
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1alpha1
kind: ServiceMonitor
metadata:
labels:
control-plane: controller-manager
name: controller-manager-metrics-monitor
namespace: system
spec:
endpoints:
- path: /metrics
port: https
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true
selector:
matchLabels:
control-plane: controller-manager
================================================
FILE: config/rbac/auth_proxy_client_clusterrole.yaml
================================================
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metrics-reader
rules:
- nonResourceURLs:
- "/metrics"
verbs:
- get
================================================
FILE: config/rbac/auth_proxy_role.yaml
================================================
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
================================================
FILE: config/rbac/auth_proxy_role_binding.yaml
================================================
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxy-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system
================================================
FILE: config/rbac/auth_proxy_service.yaml
================================================
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: controller-manager-metrics-service
namespace: system
spec:
ports:
- name: https
port: 8443
targetPort: https
selector:
control-plane: controller-manager
================================================
FILE: config/rbac/cluster_role_binding.yaml
================================================
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: manager-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system
================================================
FILE: config/rbac/imagejob_pods_service.yaml
================================================
apiVersion: v1
kind: ServiceAccount
metadata:
name: imagejob-pods
namespace: system
================================================
FILE: config/rbac/imagelist_editor_role.yaml
================================================
# permissions for end users to edit imagelists.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: imagelist-editor-role
rules:
- apiGroups:
- eraser.sh
resources:
- imagelists
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- eraser.sh
resources:
- imagelists/status
verbs:
- get
================================================
FILE: config/rbac/imagelist_viewer_role.yaml
================================================
# permissions for end users to view imagelists.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: imagelist-viewer-role
rules:
- apiGroups:
- eraser.sh
resources:
- imagelists
verbs:
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagelists/status
verbs:
- get
================================================
FILE: config/rbac/kustomization.yaml
================================================
resources:
# All RBAC will be applied under this service account in
# the deployment namespace. You may comment out this resource
# if your manager will use a service account that exists at
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
# subjects if changing service account names.
- service_account.yaml
- role.yaml
- role_binding.yaml
- imagejob_pods_service.yaml
- cluster_role_binding.yaml
# Comment the following 4 lines if you want to disable
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
# which protects your /metrics endpoint.
# - auth_proxy_service.yaml
# - auth_proxy_role.yaml
# - auth_proxy_role_binding.yaml
# - auth_proxy_client_clusterrole.yaml
# uncomment the following if you want to enable leader election
# - leader_election_role.yaml
# - leader_election_role_binding.yaml
================================================
FILE: config/rbac/leader_election_role.yaml
================================================
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: leader-election-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
================================================
FILE: config/rbac/leader_election_role_binding.yaml
================================================
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: leader-election-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system
================================================
FILE: config/rbac/role.yaml
================================================
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: manager-role
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagejobs
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagejobs/status
verbs:
- get
- patch
- update
- apiGroups:
- eraser.sh
resources:
- imagelists
verbs:
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagelists/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: manager-role
namespace: system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- podtemplates
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
================================================
FILE: config/rbac/role_binding.yaml
================================================
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: manager-rolebinding
namespace: system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: manager-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system
================================================
FILE: config/rbac/service_account.yaml
================================================
apiVersion: v1
kind: ServiceAccount
metadata:
name: controller-manager
namespace: system
================================================
FILE: controllers/configmap/configmap.go
================================================
package configmap
import (
"context"
"fmt"
"math/rand"
"os"
"os/signal"
"syscall"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
"go.opentelemetry.io/otel"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/eraser-dev/eraser/api/unversioned/config"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
controllerUtils "github.com/eraser-dev/eraser/controllers/util"
"github.com/eraser-dev/eraser/pkg/metrics"
eraserUtils "github.com/eraser-dev/eraser/pkg/utils"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
var (
log = logf.Log.WithName("controller").WithValues("process", "configmap-controller")
provider *sdkmetric.MeterProvider
configmap = types.NamespacedName{
Namespace: eraserUtils.GetNamespace(),
Name: controllerUtils.EraserConfigmapName,
}
)
// ImageListReconciler reconciles a ImageList object.
type Reconciler struct {
client.Client
scheme *runtime.Scheme
eraserConfig *config.Manager
}
func Add(mgr manager.Manager, cfg *config.Manager) error {
r, err := newReconciler(mgr, cfg)
if err != nil {
return err
}
c, err := controller.New("imagelist-controller", mgr, controller.Options{
Reconciler: r,
})
if err != nil {
return err
}
err = c.Watch(
source.Kind(mgr.GetCache(), &corev1.ConfigMap{}),
&handler.EnqueueRequestForObject{},
predicate.ResourceVersionChangedPredicate{},
predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
cfg, ok := e.ObjectNew.(*corev1.ConfigMap)
n := types.NamespacedName{Namespace: cfg.GetNamespace(), Name: cfg.GetName()}
if !ok || n != configmap {
return false
}
log.Info("configmap was updated, reloading")
return true
},
DeleteFunc: controllerUtils.NeverOnDelete,
GenericFunc: controllerUtils.NeverOnGeneric,
CreateFunc: controllerUtils.NeverOnCreate,
},
)
if err != nil {
return err
}
return nil
}
// newReconciler returns a new reconcile.Reconciler.
func newReconciler(mgr manager.Manager, cfg *config.Manager) (reconcile.Reconciler, error) {
c, err := cfg.Read()
if err != nil {
return nil, err
}
otlpEndpoint := c.Manager.OTLPEndpoint
if otlpEndpoint != "" {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer cancel()
_, _, provider = metrics.ConfigureMetrics(ctx, log, otlpEndpoint)
otel.SetMeterProvider(provider)
}
rec := &Reconciler{
Client: mgr.GetClient(),
scheme: mgr.GetScheme(),
eraserConfig: cfg,
}
return rec, nil
}
// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch,delete
func (r *Reconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
j := eraserv1.ImageJobList{}
err := r.List(ctx, &j)
if err != nil {
return ctrl.Result{}, err
}
jobs := j.Items
for i := range jobs {
if jobs[i].Status.Phase == eraserv1.PhaseRunning {
return ctrl.Result{}, fmt.Errorf("job is currently running, deferring configmap update")
}
}
p := corev1.PodList{}
err = r.List(ctx, &p, client.MatchingLabels{
"control-plane": "controller-manager",
})
if err != nil {
return ctrl.Result{}, err
}
pods := p.Items
if len(pods) == 0 {
return ctrl.Result{}, nil
}
pod := pods[0]
for i := range pods[1:] {
if pods[i].Status.Phase == corev1.PodPhase(corev1.PodRunning) {
pod = pods[i]
break
}
}
// the configmap is mounted to the filesystem, but the normal
// reconciliation loop will not update it on the node's filesystem until
// about 60-90 seconds later. updating the annotations will trigger an
// almost immediate update, which is monitored by an inotify watch set up in
// the main() function.
//
// the annotation only needs to be different from the previous value, so we
// don't need cryptographically sound random numbers here. the following
// comment disables the linter which prefers random numbers from the
// crypto/rand library.
//nolint:all
newVersion := fmt.Sprintf("%d", rand.Int63())
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
}
pod.Annotations["eraser.sh/configVersion"] = newVersion
err = r.Update(ctx, &pod)
if err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
================================================
FILE: controllers/controller.go
================================================
// Package controllers implements Kubernetes controllers for eraser resources.
package controllers
import (
"errors"
"github.com/eraser-dev/eraser/api/unversioned/config"
"github.com/eraser-dev/eraser/controllers/configmap"
"github.com/eraser-dev/eraser/controllers/imagecollector"
"github.com/eraser-dev/eraser/controllers/imagejob"
"github.com/eraser-dev/eraser/controllers/imagelist"
"k8s.io/apimachinery/pkg/api/meta"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
type controllerSetupFunc func(manager.Manager, *config.Manager) error
var (
controllerLog = ctrl.Log.WithName("controllerRuntimeLogger")
controllerAddFuncs = []controllerSetupFunc{
imagelist.Add,
imagejob.Add,
imagecollector.Add,
configmap.Add,
}
)
func SetupWithManager(m manager.Manager, cfg *config.Manager) error {
controllerLog.Info("set up with manager")
for _, f := range controllerAddFuncs {
if err := f(m, cfg); err != nil {
var kindMatchErr *meta.NoKindMatchError
if errors.As(err, &kindMatchErr) {
controllerLog.Info("CRD %v is not installed", kindMatchErr.GroupKind)
continue
}
return err
}
}
return nil
}
================================================
FILE: controllers/imagecollector/imagecollector_controller.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package imagecollector
import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"path/filepath"
"strconv"
"syscall"
"time"
"go.opentelemetry.io/otel"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/eraser-dev/eraser/api/unversioned/config"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/controllers/util"
"sigs.k8s.io/controller-runtime/pkg/controller"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/eraser-dev/eraser/pkg/logger"
"github.com/eraser-dev/eraser/pkg/metrics"
eraserUtils "github.com/eraser-dev/eraser/pkg/utils"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
)
const (
ownerLabelValue = "imagecollector"
configVolumeName = "eraser-config"
)
var (
log = logf.Log.WithName("controller").WithValues("process", "imagecollector-controller")
startTime time.Time
ownerLabel labels.Selector
exporter sdkmetric.Exporter
reader sdkmetric.Reader
provider *sdkmetric.MeterProvider
)
func init() {
var err error
ownerLabelString := fmt.Sprintf("%s=%s", util.ImageJobOwnerLabelKey, ownerLabelValue)
ownerLabel, err = labels.Parse(ownerLabelString)
if err != nil {
panic(err)
}
}
// ImageCollectorReconciler reconciles a ImageCollector object.
type Reconciler struct {
client.Client
Scheme *runtime.Scheme
eraserConfig *config.Manager
}
func Add(mgr manager.Manager, cfg *config.Manager) error {
c, err := cfg.Read()
if err != nil {
return err
}
collCfg := c.Components.Collector
if !collCfg.Enabled {
// don't add controller, but don't throw an error either
return nil
}
r, err := newReconciler(mgr, cfg)
if err != nil {
return err
}
return add(mgr, r)
}
// newReconciler returns a new reconcile.Reconciler.
func newReconciler(mgr manager.Manager, cfg *config.Manager) (*Reconciler, error) {
c, err := cfg.Read()
if err != nil {
return nil, err
}
otlpEndpoint := c.Manager.OTLPEndpoint
if otlpEndpoint != "" {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer cancel()
exporter, reader, provider = metrics.ConfigureMetrics(ctx, log, otlpEndpoint)
otel.SetMeterProvider(provider)
}
rec := &Reconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
eraserConfig: cfg,
}
return rec, nil
}
func add(mgr manager.Manager, r *Reconciler) error {
log.Info("add collector controller")
// Create a new controller
c, err := controller.New("imagecollector-controller", mgr, controller.Options{
Reconciler: r,
})
if err != nil {
return err
}
err = c.Watch(
source.Kind(mgr.GetCache(), &eraserv1.ImageJob{}),
&handler.EnqueueRequestForObject{}, predicate.Funcs{
// Do nothing on Create, Delete, or Generic events
CreateFunc: util.NeverOnCreate,
DeleteFunc: util.NeverOnDelete,
GenericFunc: util.NeverOnGeneric,
UpdateFunc: func(e event.UpdateEvent) bool {
if job, ok := e.ObjectNew.(*eraserv1.ImageJob); ok && util.IsCompletedOrFailed(job.Status.Phase) {
return ownerLabel.Matches(labels.Set(job.Labels))
}
return false
},
},
)
if err != nil {
return err
}
ch := make(chan event.GenericEvent)
err = c.Watch(&source.Channel{
Source: ch,
}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
eraserConfig, err := r.eraserConfig.Read()
if err != nil {
return err
}
scheduleCfg := eraserConfig.Manager.Scheduling
delay := time.Duration(scheduleCfg.RepeatInterval)
if scheduleCfg.BeginImmediately {
delay = 0 * time.Second
}
log.V(1).Info("delay", "delay", delay)
// runs the provided function after the specified delay
_ = time.AfterFunc(delay, func() {
log.Info("Queueing first ImageCollector reconcile...")
ch <- event.GenericEvent{
Object: &eraserv1.ImageJob{
ObjectMeta: metav1.ObjectMeta{
Name: "first-reconcile",
},
},
}
})
return nil
}
//+kubebuilder:rbac:groups=eraser.sh,resources=imagelists,verbs=get;list;watch
//+kubebuilder:rbac:groups="",namespace="system",resources=podtemplates,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=eraser.sh,resources=imagelists/status,verbs=get;update;patch
//+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch
//+kubebuilder:rbac:groups="",namespace="system",resources=pods,verbs=get;list;watch;update;create;delete
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the ImageCollector object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.2/pkg/reconcile
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log.Info("ImageCollector Reconcile")
defer log.Info("done reconcile")
imageJobList := &eraserv1.ImageJobList{}
if err := r.List(ctx, imageJobList); err != nil {
log.Info("could not list imagejobs")
return ctrl.Result{}, err
}
if req.Name == "first-reconcile" {
for idx := range imageJobList.Items {
if err := r.Delete(ctx, &imageJobList.Items[idx]); err != nil {
log.Info("error cleaning up previous imagejobs")
return ctrl.Result{}, err
}
}
return r.createImageJob(ctx)
}
switch len(imageJobList.Items) {
case 0:
// If we reach this point, reconcile has been called on a timer, and we want to begin a
// collector ImageJob
return r.createImageJob(ctx)
case 1:
// an imagejob has just completed; proceed to imagelist creation.
return r.handleCompletedImageJob(ctx, &imageJobList.Items[0])
default:
return ctrl.Result{}, fmt.Errorf("more than one collector ImageJobs are scheduled")
}
}
func (r *Reconciler) handleJobDeletion(ctx context.Context, job *eraserv1.ImageJob) (ctrl.Result, error) {
until := time.Until(job.Status.DeleteAfter.Time)
if until > 0 {
log.Info("Delaying imagejob delete", "job", job.Name, "deleteAter", job.Status.DeleteAfter)
return ctrl.Result{RequeueAfter: until}, nil
}
log.Info("Deleting imagejob", "job", job.Name)
err := r.Delete(ctx, job)
if err != nil {
return ctrl.Result{}, err
}
template := corev1.PodTemplate{}
if err := r.Get(ctx,
types.NamespacedName{
Namespace: eraserUtils.GetNamespace(),
Name: job.GetName(),
},
&template,
); err != nil {
return ctrl.Result{}, err
}
log.Info("Deleting pod template", "template", template.Name)
if err := r.Delete(ctx, &template); err != nil {
return ctrl.Result{}, err
}
log.Info("end job deletion")
return ctrl.Result{}, nil
}
func (r *Reconciler) createImageJob(ctx context.Context) (ctrl.Result, error) {
eraserConfig, err := r.eraserConfig.Read()
if err != nil {
return ctrl.Result{}, err
}
mgrCfg := eraserConfig.Manager
compCfg := eraserConfig.Components
scanCfg := compCfg.Scanner
collectorCfg := compCfg.Collector
eraserCfg := compCfg.Remover
scanDisabled := !scanCfg.Enabled
startTime = time.Now()
removerImg := *util.RemoverImage
if removerImg == "" {
iCfg := eraserCfg.Image
removerImg = fmt.Sprintf("%s:%s", iCfg.Repo, iCfg.Tag)
}
log.V(1).Info("removerImg", "removerImg", removerImg)
iCfg := collectorCfg.Image
collectorImg := fmt.Sprintf("%s:%s", iCfg.Repo, iCfg.Tag)
profileConfig := eraserConfig.Manager.Profile
profileArgs := []string{
"--enable-pprof=" + strconv.FormatBool(profileConfig.Enabled),
fmt.Sprintf("--pprof-port=%d", profileConfig.Port),
}
collArgs := []string{"--scan-disabled=" + strconv.FormatBool(scanDisabled)}
collArgs = append(collArgs, profileArgs...)
removerArgs := []string{"--log-level=" + logger.GetLevel()}
removerArgs = append(removerArgs, profileArgs...)
pullSecrets := []corev1.LocalObjectReference{}
for _, secret := range eraserConfig.Manager.PullSecrets {
pullSecrets = append(pullSecrets, corev1.LocalObjectReference{Name: secret})
}
jobTemplate := corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
// EmptyDir default
Name: "shared-data",
},
{
Name: configVolumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: util.EraserConfigmapName,
},
},
},
},
},
ImagePullSecrets: pullSecrets,
RestartPolicy: corev1.RestartPolicyNever,
PriorityClassName: eraserConfig.Manager.PriorityClassName,
Containers: []corev1.Container{
{
Name: "collector",
Image: collectorImg,
ImagePullPolicy: corev1.PullIfNotPresent,
Args: collArgs,
VolumeMounts: []corev1.VolumeMount{
{MountPath: "/run/eraser.sh/shared-data", Name: "shared-data"},
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
"cpu": collectorCfg.Request.CPU,
"memory": collectorCfg.Request.Mem,
},
Limits: corev1.ResourceList{
"memory": collectorCfg.Limit.Mem,
},
},
},
{
Name: "remover",
Image: removerImg,
ImagePullPolicy: corev1.PullIfNotPresent,
Args: removerArgs,
VolumeMounts: []corev1.VolumeMount{
{MountPath: "/run/eraser.sh/shared-data", Name: "shared-data"},
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
"cpu": eraserCfg.Request.CPU,
"memory": eraserCfg.Request.Mem,
},
Limits: corev1.ResourceList{
"memory": eraserCfg.Limit.Mem,
},
},
SecurityContext: eraserUtils.SharedSecurityContext,
Env: []corev1.EnvVar{
{
Name: "OTEL_EXPORTER_OTLP_ENDPOINT",
Value: mgrCfg.OTLPEndpoint,
},
{
Name: "OTEL_SERVICE_NAME",
Value: "remover",
},
},
},
},
ServiceAccountName: "eraser-imagejob-pods",
},
}
job := &eraserv1alpha1.ImageJob{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "imagejob-",
Labels: map[string]string{
util.ImageJobOwnerLabelKey: ownerLabelValue,
},
},
}
if !scanDisabled {
iCfg := scanCfg.Image
scannerImg := fmt.Sprintf("%s:%s", iCfg.Repo, iCfg.Tag)
cfgDirname := "/config"
cfgFilename := filepath.Join(cfgDirname, "controller_manager_config.yaml")
scannerArgs := []string{fmt.Sprintf("--config=%s", cfgFilename)}
scannerArgs = append(scannerArgs, profileArgs...)
scannerContainer := corev1.Container{
Name: "trivy-scanner",
Image: scannerImg,
Args: scannerArgs,
VolumeMounts: []corev1.VolumeMount{
{MountPath: "/run/eraser.sh/shared-data", Name: "shared-data"},
{MountPath: cfgDirname, Name: configVolumeName},
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
"memory": scanCfg.Request.Mem,
"cpu": scanCfg.Request.CPU,
},
Limits: corev1.ResourceList{
"memory": scanCfg.Limit.Mem,
},
},
// env vars for exporting metrics
Env: []corev1.EnvVar{
{
Name: "OTEL_EXPORTER_OTLP_ENDPOINT",
Value: mgrCfg.OTLPEndpoint,
},
{
Name: "OTEL_SERVICE_NAME",
Value: "trivy-scanner",
},
{
Name: "ERASER_RUNTIME_NAME",
Value: string(mgrCfg.Runtime.Name),
},
},
}
log.Info("extra mount for scanner starts")
scannerVolumes := compCfg.Scanner.Volumes
if len(scannerVolumes) != 0 {
jobTemplate.Spec.Volumes = append(jobTemplate.Spec.Volumes, scannerVolumes...)
scannerVolumeMounts := []corev1.VolumeMount{}
for idx := range scannerVolumes {
volume := scannerVolumes[idx]
if volume.HostPath == nil {
log.Error(fmt.Errorf("volume hostPath is nil"), "invalid volume", "volumeName", volume.Name)
continue
}
scannerVolumeMounts = append(scannerVolumeMounts, corev1.VolumeMount{
Name: volume.Name,
MountPath: volume.HostPath.Path,
ReadOnly: true,
})
}
scannerContainer.VolumeMounts = append(scannerContainer.VolumeMounts, scannerVolumeMounts...)
}
jobTemplate.Spec.Containers = append(jobTemplate.Spec.Containers, scannerContainer)
}
configmapList := &corev1.ConfigMapList{}
if err := r.List(ctx, configmapList, client.InNamespace(eraserUtils.GetNamespace())); err != nil {
log.Info("Could not get list of configmaps")
return reconcile.Result{}, err
}
exclusionMount, exclusionVolume, err := util.GetExclusionVolume(configmapList)
if err != nil {
log.Info("Could not get exclusion mounts and volumes")
return reconcile.Result{}, err
}
for i := range jobTemplate.Spec.Containers {
jobTemplate.Spec.Containers[i].VolumeMounts = append(jobTemplate.Spec.Containers[i].VolumeMounts, exclusionMount...)
}
jobTemplate.Spec.Volumes = append(jobTemplate.Spec.Volumes, exclusionVolume...)
err = r.Create(ctx, job)
if err != nil {
log.Info("Could not create collector ImageJob")
return reconcile.Result{}, err
}
// get manager pod with label control-plane=controller-manager
podList := corev1.PodList{}
if err := r.List(ctx, &podList, client.InNamespace(eraserUtils.GetNamespace()), client.MatchingLabels{"control-plane": "controller-manager"}); err != nil {
log.Info("Unable to list controller-manager pod")
}
if len(podList.Items) != 1 {
log.Info("Incorrect number of controller-manager pods", "number of pods", len(podList.Items))
}
managerPod := &podList.Items[0]
namespace := eraserUtils.GetNamespace()
template := corev1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: job.GetName(),
Namespace: namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(managerPod, managerPod.GroupVersionKind()),
},
},
Template: jobTemplate,
}
err = r.Create(ctx, &template)
if err != nil {
log.Error(err, "Could not create collector PodTemplate")
return reconcile.Result{}, err
}
log.Info("Successfully created collector ImageJob", "job", job.Name)
return reconcile.Result{}, nil
}
func (r *Reconciler) handleCompletedImageJob(ctx context.Context, childJob *eraserv1.ImageJob) (ctrl.Result, error) {
var err error
var timeRemaining time.Duration
eraserConfig, err := r.eraserConfig.Read()
if err != nil {
return ctrl.Result{}, err
}
otlpEndpoint := eraserConfig.Manager.OTLPEndpoint
repeatInterval := time.Duration(eraserConfig.Manager.Scheduling.RepeatInterval)
cleanupCfg := eraserConfig.Manager.ImageJob.Cleanup
successDelay := time.Duration(cleanupCfg.DelayOnSuccess)
errDelay := time.Duration(cleanupCfg.DelayOnFailure)
switch phase := childJob.Status.Phase; phase {
case eraserv1.PhaseCompleted:
log.Info("completed phase")
if childJob.Status.DeleteAfter == nil {
childJob.Status.DeleteAfter = util.After(time.Now(), int64(successDelay.Seconds()))
if err := r.Status().Update(ctx, childJob); err != nil {
log.Info("Could not update Delete After for job " + childJob.Name)
}
return ctrl.Result{}, nil
}
if otlpEndpoint != "" {
// record metrics
if err := metrics.RecordMetricsController(ctx, otel.GetMeterProvider(), float64(time.Since(startTime).Seconds()), int64(childJob.Status.Succeeded), int64(childJob.Status.Failed)); err != nil {
log.Error(err, "error recording metrics")
}
metrics.ExportMetrics(log, exporter, reader)
}
timeRemaining = repeatInterval - successDelay
if res, err := r.handleJobDeletion(ctx, childJob); err != nil || res.RequeueAfter > 0 {
return res, err
}
case eraserv1.PhaseFailed:
log.Info("failed phase")
if childJob.Status.DeleteAfter == nil {
childJob.Status.DeleteAfter = util.After(time.Now(), int64(errDelay.Seconds()))
if err := r.Status().Update(ctx, childJob); err != nil {
log.Info("Could not update Delete After for job " + childJob.Name)
}
return ctrl.Result{}, nil
}
if otlpEndpoint != "" {
// record metrics
if err := metrics.RecordMetricsController(ctx, otel.GetMeterProvider(), float64(time.Since(startTime).Milliseconds()), int64(childJob.Status.Succeeded), int64(childJob.Status.Failed)); err != nil {
log.Error(err, "error recording metrics")
}
metrics.ExportMetrics(log, exporter, reader)
}
timeRemaining = repeatInterval - errDelay
if res, err := r.handleJobDeletion(ctx, childJob); err != nil || res.RequeueAfter > 0 {
return res, err
}
default:
err = errors.New("should not reach this point for imagejob")
log.Error(err, "imagejob not in completed or failed phase", "imagejob", childJob)
}
if timeRemaining <= 0 {
return ctrl.Result{Requeue: true}, err
}
return ctrl.Result{RequeueAfter: timeRemaining}, err
}
================================================
FILE: controllers/imagejob/imagejob_controller.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package imagejob
import (
"context"
"fmt"
"net/url"
"os"
"strings"
"time"
"golang.org/x/exp/slices"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"sigs.k8s.io/kind/pkg/errors"
"github.com/eraser-dev/eraser/api/unversioned"
"github.com/eraser-dev/eraser/api/unversioned/config"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
controllerUtils "github.com/eraser-dev/eraser/controllers/util"
eraserUtils "github.com/eraser-dev/eraser/pkg/utils"
)
const (
defaultFilterLabel = "eraser.sh/cleanup.filter"
windowsFilterLabel = "kubernetes.io/os=windows"
imageJobTypeLabelKey = "eraser.sh/type"
collectorJobType = "collector"
manualJobType = "manual"
removerContainer = "remover"
managerLabelValue = "controller-manager"
managerLabelKey = "control-plane"
)
var log = logf.Log.WithName("controller").WithValues("process", "imagejob-controller")
var defaultTolerations = []corev1.Toleration{
{
Operator: corev1.TolerationOpExists,
},
}
func Add(mgr manager.Manager, cfg *config.Manager) error {
return add(mgr, newReconciler(mgr, cfg))
}
// newReconciler returns a new reconcile.Reconciler.
func newReconciler(mgr manager.Manager, cfg *config.Manager) reconcile.Reconciler {
rec := &Reconciler{
Client: mgr.GetClient(),
scheme: mgr.GetScheme(),
eraserConfig: cfg,
}
return rec
}
// ImageJobReconciler reconciles a ImageJob object.
type Reconciler struct {
client.Client
scheme *runtime.Scheme
eraserConfig *config.Manager
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler.
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("imagejob-controller", mgr, controller.Options{
Reconciler: r,
})
if err != nil {
return err
}
// Watch for changes to ImageJob
err = c.Watch(source.Kind(mgr.GetCache(), &eraserv1.ImageJob{}), &handler.EnqueueRequestForObject{}, predicate.Funcs{
UpdateFunc: func(e event.UpdateEvent) bool {
if job, ok := e.ObjectNew.(*eraserv1.ImageJob); ok && controllerUtils.IsCompletedOrFailed(job.Status.Phase) {
return false // handled by Owning controller
}
return true
},
CreateFunc: controllerUtils.AlwaysOnCreate,
GenericFunc: controllerUtils.NeverOnGeneric,
DeleteFunc: controllerUtils.NeverOnDelete,
})
if err != nil {
return err
}
// Watch for changes to pods created by ImageJob (eraser pods)
err = c.Watch(
source.Kind(mgr.GetCache(), &corev1.Pod{}),
handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &corev1.PodTemplate{}),
predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
return e.Object.GetNamespace() == eraserUtils.GetNamespace()
},
UpdateFunc: func(e event.UpdateEvent) bool {
return e.ObjectNew.GetNamespace() == eraserUtils.GetNamespace()
},
DeleteFunc: func(e event.DeleteEvent) bool {
return e.Object.GetNamespace() == eraserUtils.GetNamespace()
},
},
)
if err != nil {
return err
}
// watch for changes to imagejob podTemplate (owned by controller manager pod)
err = c.Watch(
source.Kind(mgr.GetCache(), &corev1.PodTemplate{}),
handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &corev1.Pod{}),
predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
ownerLabels, ok := e.Object.GetLabels()[managerLabelKey]
return ok && ownerLabels == managerLabelValue
},
UpdateFunc: func(e event.UpdateEvent) bool {
ownerLabels, ok := e.ObjectNew.GetLabels()[managerLabelKey]
return ok && ownerLabels == managerLabelValue
},
DeleteFunc: func(e event.DeleteEvent) bool {
ownerLabels, ok := e.Object.GetLabels()[managerLabelKey]
return ok && ownerLabels == managerLabelValue
},
},
)
if err != nil {
return err
}
return nil
}
//+kubebuilder:rbac:groups=eraser.sh,resources=imagejobs,verbs=get;list;watch;create;delete
//+kubebuilder:rbac:groups="",namespace="system",resources=podtemplates,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=eraser.sh,resources=imagejobs/status,verbs=get;update;patch
//+kubebuilder:rbac:groups="",namespace="system",resources=configmaps,verbs=get;list;watch;create;update;patch;delete
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the ImageJob object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.8.3/pkg/reconcile
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
imageJob := &eraserv1.ImageJob{}
if err := r.Get(ctx, req.NamespacedName, imageJob); err != nil {
imageJob.Status.Phase = eraserv1.PhaseFailed
if err := r.updateJobStatus(ctx, imageJob); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, client.IgnoreNotFound(err)
}
switch imageJob.Status.Phase {
case "":
if err := r.handleNewJob(ctx, imageJob); err != nil {
return ctrl.Result{}, fmt.Errorf("reconcile new: %w", err)
}
case eraserv1.PhaseRunning:
if err := r.handleRunningJob(ctx, imageJob); err != nil {
return ctrl.Result{}, fmt.Errorf("reconcile running: %w", err)
}
case eraserv1.PhaseCompleted, eraserv1.PhaseFailed:
break // this is handled by the Owning controller
default:
return ctrl.Result{}, fmt.Errorf("reconcile: unexpected imagejob phase: %s", imageJob.Status.Phase)
}
return ctrl.Result{}, nil
}
func podListOptions(jobTemplate *corev1.PodTemplate) client.ListOptions {
var set map[string]string
if jobTemplate.Template.Spec.Containers[0].Name == removerContainer {
set = map[string]string{imageJobTypeLabelKey: manualJobType}
} else {
set = map[string]string{imageJobTypeLabelKey: collectorJobType}
}
return client.ListOptions{
Namespace: eraserUtils.GetNamespace(),
LabelSelector: labels.SelectorFromSet(set),
}
}
func (r *Reconciler) handleRunningJob(ctx context.Context, imageJob *eraserv1.ImageJob) error {
// get eraser pods
podList := &corev1.PodList{}
template := corev1.PodTemplate{}
namespace := eraserUtils.GetNamespace()
err := r.Get(ctx, types.NamespacedName{
Name: imageJob.GetName(),
Namespace: namespace,
}, &template)
if err != nil {
imageJob.Status = eraserv1.ImageJobStatus{
Phase: eraserv1.PhaseFailed,
DeleteAfter: controllerUtils.After(time.Now(), 1),
}
return r.updateJobStatus(ctx, imageJob)
}
listOpts := podListOptions(&template)
err = r.List(ctx, podList, &listOpts)
if err != nil {
return err
}
failed := 0
success := 0
skipped := imageJob.Status.Skipped
if !podsComplete(podList.Items) {
return nil
}
// if all pods are complete, job is complete
// get status of pods
for i := range podList.Items {
if podList.Items[i].Status.Phase == corev1.PodSucceeded {
success++
} else {
failed++
}
}
imageJob.Status = eraserv1.ImageJobStatus{
Desired: imageJob.Status.Desired,
Succeeded: success,
Skipped: skipped,
Failed: failed,
Phase: eraserv1.PhaseCompleted,
}
successAndSkipped := success + skipped
eraserConfig, err := r.eraserConfig.Read()
if err != nil {
return err
}
managerConfig := eraserConfig.Manager
successRatio := managerConfig.ImageJob.SuccessRatio
if float64(successAndSkipped/imageJob.Status.Desired) < successRatio {
log.Info(
"Marking job as failed",
"success ratio", successRatio,
"actual ratio", success/imageJob.Status.Desired,
)
imageJob.Status.Phase = eraserv1.PhaseFailed
}
return r.updateJobStatus(ctx, imageJob)
}
func (r *Reconciler) handleNewJob(ctx context.Context, imageJob *eraserv1.ImageJob) error {
nodes := &corev1.NodeList{}
err := r.List(ctx, nodes)
if err != nil {
return err
}
template := corev1.PodTemplate{}
err = r.Get(ctx,
types.NamespacedName{
Namespace: eraserUtils.GetNamespace(),
Name: imageJob.GetName(),
},
&template,
)
if err != nil {
return err
}
imageJob.Status = eraserv1.ImageJobStatus{
Desired: len(nodes.Items),
Succeeded: 0,
Skipped: 0, // placeholder, updated below
Failed: 0,
Phase: eraserv1.PhaseRunning,
}
skipped := 0
var nodeList []corev1.Node
log := log.WithValues("job", imageJob.Name)
env := []corev1.EnvVar{
{Name: "NODE_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "spec.nodeName"}}},
}
eraserConfig, err := r.eraserConfig.Read()
if err != nil {
return err
}
log.V(1).Info("configuration used", "manager", eraserConfig.Manager, "components", eraserConfig.Components)
filterOpts := eraserConfig.Manager.NodeFilter
if !slices.Contains(filterOpts.Selectors, defaultFilterLabel) {
filterOpts.Selectors = append(filterOpts.Selectors, defaultFilterLabel)
}
switch filterOpts.Type {
case "exclude":
nodeList, skipped, err = filterOutSkippedNodes(nodes, filterOpts.Selectors)
if err != nil {
return err
}
case "include":
nodeList, skipped, err = selectIncludedNodes(nodes, filterOpts.Selectors)
if err != nil {
return err
}
default:
return errors.Errorf("invalid node filter option")
}
imageJob.Status.Skipped = skipped
if err := r.updateJobStatus(ctx, imageJob); err != nil {
return err
}
var namespacedNames []types.NamespacedName
podSpecTemplate := template.Template.Spec
for i := range nodeList {
log := log.WithValues("node", nodeList[i].Name)
podSpec, err := copyAndFillTemplateSpec(&podSpecTemplate, env, &nodeList[i], &eraserConfig.Manager.Runtime)
if err != nil {
return err
}
containerName := podSpec.Containers[0].Name
nodeName := nodeList[i].Name
pod := &corev1.Pod{
TypeMeta: metav1.TypeMeta{},
Spec: *podSpec,
ObjectMeta: metav1.ObjectMeta{
Namespace: eraserUtils.GetNamespace(),
GenerateName: "eraser-" + nodeName + "-",
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(&template, template.GroupVersionKind()),
},
},
}
pod.Labels = map[string]string{}
for k, v := range eraserConfig.Manager.AdditionalPodLabels {
pod.Labels[k] = v
}
if containerName == removerContainer {
pod.Labels[imageJobTypeLabelKey] = manualJobType
} else {
pod.Labels[imageJobTypeLabelKey] = collectorJobType
}
err = r.Create(ctx, pod)
if err != nil {
return err
}
log.Info("Started "+containerName+" pod on node", "nodeName", nodeName)
namespacedNames = append(namespacedNames, types.NamespacedName{Name: pod.Name, Namespace: pod.Namespace})
}
for _, namespacedName := range namespacedNames {
//nolint:staticcheck // SA1019: TODO: Replace with PollUntilContextTimeout in future refactor
if err := wait.PollImmediate(time.Nanosecond, time.Minute*5, r.isPodReady(ctx, namespacedName)); err != nil {
log.Error(err, "timed out waiting for pod to leave pending state", "pod NamespacedName", namespacedName)
}
}
return nil
}
func (r *Reconciler) isPodReady(ctx context.Context, namespacedName types.NamespacedName) wait.ConditionFunc {
return func() (bool, error) {
currentPod := &corev1.Pod{}
if err := r.Get(ctx, namespacedName, currentPod); err != nil {
return false, client.IgnoreNotFound(err)
}
return currentPod.Status.Phase != corev1.PodPhase(corev1.PodPending), nil
}
}
// SetupWithManager sets up the controller with the Manager.
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
log.Info("imagejob set up with manager")
return ctrl.NewControllerManagedBy(mgr).
For(&eraserv1.ImageJob{}).
Complete(r)
}
func podsComplete(podList []corev1.Pod) bool {
for i := range podList {
if podList[i].Status.Phase == corev1.PodRunning || podList[i].Status.Phase == corev1.PodPending {
return containersFailed(&podList[i])
}
}
return true
}
func containersFailed(pod *corev1.Pod) bool {
statuses := pod.Status.ContainerStatuses
for i := range statuses {
if statuses[i].State.Terminated != nil && statuses[i].State.Terminated.ExitCode != 0 {
return true
}
}
return false
}
func (r *Reconciler) updateJobStatus(ctx context.Context, imageJob *eraserv1.ImageJob) error {
if imageJob.Name != "" {
if err := r.Status().Update(ctx, imageJob); err != nil {
return err
}
}
return nil
}
func selectIncludedNodes(nodes *corev1.NodeList, includeNodesSelectors []string) ([]corev1.Node, int, error) {
skipped := 0
nodeList := make([]corev1.Node, 0, len(nodes.Items))
nodes:
for i := range nodes.Items {
log := log.WithValues("node", nodes.Items[i].Name)
skipped++
nodeName := nodes.Items[i].Name
for _, includeNodesSelectors := range includeNodesSelectors {
includedLabels, err := labels.Parse(includeNodesSelectors)
if err != nil {
return nil, -1, err
}
log.V(1).Info("includedLabels", "includedLabels", includedLabels)
log.V(1).Info("nodeLabels", "nodeLabels", nodes.Items[i].Labels)
if includedLabels.Matches(labels.Set(nodes.Items[i].Labels)) {
log.Info("node is included because it matched the specified labels",
"nodeName", nodeName,
"labels", nodes.Items[i].Labels,
"specifiedSelectors", includeNodesSelectors,
)
nodeList = append(nodeList, nodes.Items[i])
skipped--
continue nodes
}
}
}
return nodeList, skipped, nil
}
func filterOutSkippedNodes(nodes *corev1.NodeList, skipNodesSelectors []string) ([]corev1.Node, int, error) {
skipped := 0
nodeList := make([]corev1.Node, 0, len(nodes.Items))
nodes:
for i := range nodes.Items {
log := log.WithValues("node", nodes.Items[i].Name)
nodeName := nodes.Items[i].Name
for _, skipNodesSelector := range skipNodesSelectors {
skipLabels, err := labels.Parse(skipNodesSelector)
if err != nil {
return nil, -1, err
}
log.V(1).Info("skipLabels", "skipLabels", skipLabels)
log.V(1).Info("nodeLabels", "nodeLabels", nodes.Items[i].Labels)
if skipLabels.Matches(labels.Set(nodes.Items[i].Labels)) {
log.Info("node will be skipped because it matched the specified labels",
"nodeName", nodeName,
"labels", nodes.Items[i].Labels,
"specifiedSelectors", skipNodesSelectors,
)
skipped++
continue nodes
}
}
nodeList = append(nodeList, nodes.Items[i])
}
return nodeList, skipped, nil
}
func copyAndFillTemplateSpec(templateSpecTemplate *corev1.PodSpec, env []corev1.EnvVar, node *corev1.Node, runtimeSpec *unversioned.RuntimeSpec) (*corev1.PodSpec, error) {
nodeName := node.Name
u, err := url.Parse(runtimeSpec.Address)
if err != nil {
return nil, err
}
volumes := []corev1.Volume{
{Name: "runtime-sock-volume", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: u.Path}}},
}
volumeMounts := []corev1.VolumeMount{
{MountPath: controllerUtils.CRIPath, Name: "runtime-sock-volume"},
}
templateSpec := templateSpecTemplate.DeepCopy()
templateSpec.Tolerations = defaultTolerations
eraserImg := &templateSpec.Containers[0]
eraserImg.VolumeMounts = append(eraserImg.VolumeMounts, volumeMounts...)
eraserImg.Env = append(eraserImg.Env, env...)
if len(templateSpec.Containers) > 1 {
collectorImg := &templateSpec.Containers[1]
collectorImg.VolumeMounts = append(collectorImg.VolumeMounts, volumeMounts...)
collectorImg.Env = append(collectorImg.Env, env...)
}
if len(templateSpec.Containers) > 2 {
scannerImg := &templateSpec.Containers[2]
scannerImg.VolumeMounts = append(scannerImg.VolumeMounts, volumeMounts...)
scannerImg.Env = append(scannerImg.Env,
corev1.EnvVar{
Name: controllerUtils.EnvVarContainerdNamespaceKey,
Value: controllerUtils.EnvVarContainerdNamespaceValue,
},
)
scannerImg.Env = append(scannerImg.Env, env...)
}
secrets := os.Getenv("ERASER_PULL_SECRET_NAMES")
if secrets != "" {
for _, secret := range strings.Split(secrets, ",") {
templateSpec.ImagePullSecrets = append(templateSpec.ImagePullSecrets, corev1.LocalObjectReference{Name: secret})
}
}
templateSpec.Volumes = append(volumes, templateSpec.Volumes...)
templateSpec.NodeName = nodeName
return templateSpec, nil
}
================================================
FILE: controllers/imagelist/imagelist_controller.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package imagelist
import (
"context"
"encoding/json"
"fmt"
"os"
"os/signal"
"path/filepath"
"syscall"
"time"
"go.opentelemetry.io/otel"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/eraser-dev/eraser/api/unversioned/config"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
"github.com/eraser-dev/eraser/controllers/util"
"github.com/eraser-dev/eraser/pkg/logger"
"github.com/eraser-dev/eraser/pkg/metrics"
eraserUtils "github.com/eraser-dev/eraser/pkg/utils"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
)
const (
imgListPath = "/run/eraser.sh/imagelist"
ownerLabelValue = "imagelist-controller"
)
var (
log = logf.Log.WithName("controller").WithValues("process", "imagelist-controller")
imageList = types.NamespacedName{Name: "imagelist"}
ownerLabel labels.Selector
startTime time.Time
exporter sdkmetric.Exporter
reader sdkmetric.Reader
provider *sdkmetric.MeterProvider
)
func init() {
var err error
ownerLabelString := fmt.Sprintf("%s=%s", util.ImageJobOwnerLabelKey, ownerLabelValue)
ownerLabel, err = labels.Parse(ownerLabelString)
if err != nil {
panic(err)
}
}
func Add(mgr manager.Manager, cfg *config.Manager) error {
r, err := newReconciler(mgr, cfg)
if err != nil {
return err
}
return add(mgr, r)
}
// newReconciler returns a new reconcile.Reconciler.
func newReconciler(mgr manager.Manager, cfg *config.Manager) (reconcile.Reconciler, error) {
c, err := cfg.Read()
if err != nil {
return nil, err
}
otlpEndpoint := c.Manager.OTLPEndpoint
if otlpEndpoint != "" {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer cancel()
exporter, reader, provider = metrics.ConfigureMetrics(ctx, log, otlpEndpoint)
otel.SetMeterProvider(provider)
}
rec := &Reconciler{
Client: mgr.GetClient(),
scheme: mgr.GetScheme(),
eraserConfig: cfg,
}
return rec, nil
}
// ImageJobReconciler reconciles a ImageJob object.
type ImageJobReconciler struct {
client.Client
}
// ImageListReconciler reconciles a ImageList object.
type Reconciler struct {
client.Client
scheme *runtime.Scheme
eraserConfig *config.Manager
}
//+kubebuilder:rbac:groups=eraser.sh,resources=imagelists,verbs=get;list;watch
//+kubebuilder:rbac:groups="",namespace="system",resources=podtemplates,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=eraser.sh,resources=imagelists/status,verbs=get;update;patch
//+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch
//+kubebuilder:rbac:groups="",namespace="system",resources=pods,verbs=get;list;watch;update;create;delete
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the ImageList object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.8.3/pkg/reconcile
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
// Ignore unsupported lists
if req.NamespacedName != imageList {
log.Info("Ignoring unsupported imagelist name", "name", req.Name)
return reconcile.Result{}, nil
}
imageList := eraserv1.ImageList{}
err := r.Get(ctx, req.NamespacedName, &imageList)
if err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
jobList := eraserv1.ImageJobList{}
err = r.List(ctx, &jobList)
if client.IgnoreNotFound(err) != nil {
return ctrl.Result{}, err
}
items := util.FilterJobListByOwner(jobList.Items, metav1.NewControllerRef(&imageList, imageList.GroupVersionKind()))
switch len(items) {
case 0:
return r.handleImageListEvent(ctx, &imageList)
case 1:
job := items[0]
// If we got here because of a completed ImageJob:
if util.IsCompletedOrFailed(job.Status.Phase) {
return r.handleJobListEvent(ctx, &imageList, &job)
}
// If we got here due to an update to the ImageList, and there is an ImageJob already running,
// keep requeueing it until that job is completed.
return ctrl.Result{RequeueAfter: time.Minute}, nil
default:
return ctrl.Result{}, fmt.Errorf("there are multiple child imagejobs running")
}
}
func (r *Reconciler) handleJobListEvent(ctx context.Context, imageList *eraserv1.ImageList, job *eraserv1.ImageJob) (ctrl.Result, error) {
phase := job.Status.Phase
if phase == eraserv1.PhaseCompleted || phase == eraserv1.PhaseFailed {
err := r.handleJobCompletion(ctx, imageList, job)
if err != nil {
return ctrl.Result{}, err
}
eraserConfig, err := r.eraserConfig.Read()
if err != nil {
return ctrl.Result{}, err
}
cleanupCfg := eraserConfig.Manager.ImageJob.Cleanup
successDelay := time.Duration(cleanupCfg.DelayOnSuccess)
errDelay := time.Duration(cleanupCfg.DelayOnFailure)
if job.Status.DeleteAfter == nil {
switch job.Status.Phase {
case eraserv1.PhaseCompleted:
job.Status.DeleteAfter = util.After(time.Now(), int64(successDelay.Seconds()))
case eraserv1.PhaseFailed:
job.Status.DeleteAfter = util.After(time.Now(), int64(errDelay.Seconds()))
}
if err := r.Status().Update(ctx, job); err != nil {
log.Info("Could not update Delete After for job " + job.Name)
}
return ctrl.Result{}, nil
}
otlpEndpoint := eraserConfig.Manager.OTLPEndpoint
if otlpEndpoint != "" {
// record metrics
if err := metrics.RecordMetricsController(ctx, otel.GetMeterProvider(), float64(time.Since(startTime).Seconds()), int64(job.Status.Succeeded), int64(job.Status.Failed)); err != nil {
log.Error(err, "error recording metrics")
}
metrics.ExportMetrics(log, exporter, reader)
}
return r.handleJobDeletion(ctx, job)
}
return ctrl.Result{}, fmt.Errorf("unexpected job phase: '%s'", job.Status.Phase)
}
func (r *Reconciler) handleJobDeletion(ctx context.Context, job *eraserv1.ImageJob) (ctrl.Result, error) {
until := time.Until(job.Status.DeleteAfter.Time)
if until > 0 {
log.Info("Delaying imagejob delete", "job", job.Name, "deleteAter", job.Status.DeleteAfter)
return ctrl.Result{RequeueAfter: until}, nil
}
log.Info("Deleting imagejob", "job", job.Name)
err := r.Delete(ctx, job)
if err != nil {
return ctrl.Result{}, err
}
template := corev1.PodTemplate{}
if err := r.Get(ctx,
types.NamespacedName{
Namespace: eraserUtils.GetNamespace(),
Name: job.GetName(),
},
&template,
); err != nil {
return ctrl.Result{}, err
}
log.Info("Deleting pod template", "template", template.Name)
if err := r.Delete(ctx, &template); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *Reconciler) handleImageListEvent(ctx context.Context, imageList *eraserv1.ImageList) (ctrl.Result, error) {
imgListJSON, err := json.Marshal(imageList.Spec.Images)
if err != nil {
return ctrl.Result{}, fmt.Errorf("marshal image list: %w", err)
}
configMap := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "imagelist-",
Namespace: eraserUtils.GetNamespace(),
},
Immutable: eraserUtils.BoolPtr(true),
Data: map[string]string{"images": string(imgListJSON)},
}
if err := r.Create(ctx, &configMap); err != nil {
return ctrl.Result{}, fmt.Errorf("create configmap: %w", err)
}
configName := configMap.Name
args := []string{
"--imagelist=" + filepath.Join(imgListPath, "images"),
"--log-level=" + logger.GetLevel(),
}
eraserConfig, err := r.eraserConfig.Read()
if err != nil {
return ctrl.Result{}, err
}
eraserContainerCfg := eraserConfig.Components.Remover
imageCfg := eraserContainerCfg.Image
image := fmt.Sprintf("%s:%s", imageCfg.Repo, imageCfg.Tag)
pullSecrets := []corev1.LocalObjectReference{}
for _, secret := range eraserConfig.Manager.PullSecrets {
pullSecrets = append(pullSecrets, corev1.LocalObjectReference{Name: secret})
}
jobTemplate := corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Volumes: []corev1.Volume{
{
Name: configName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: configName}},
},
},
},
ImagePullSecrets: pullSecrets,
RestartPolicy: corev1.RestartPolicyNever,
PriorityClassName: eraserConfig.Manager.PriorityClassName,
Containers: []corev1.Container{
{
Name: "remover",
Image: image,
ImagePullPolicy: corev1.PullIfNotPresent,
Args: args,
VolumeMounts: []corev1.VolumeMount{
{MountPath: imgListPath, Name: configName},
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
"cpu": eraserContainerCfg.Request.CPU,
"memory": eraserContainerCfg.Request.Mem,
},
Limits: corev1.ResourceList{
"memory": eraserContainerCfg.Limit.Mem,
},
},
SecurityContext: eraserUtils.SharedSecurityContext,
// env vars for exporting metrics
Env: []corev1.EnvVar{
{
Name: "OTEL_EXPORTER_OTLP_ENDPOINT",
Value: eraserConfig.Manager.OTLPEndpoint,
},
{
Name: "OTEL_SERVICE_NAME",
Value: "remover",
},
},
},
},
ServiceAccountName: "eraser-imagejob-pods",
},
}
job := &eraserv1.ImageJob{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "imagejob-",
Labels: map[string]string{
util.ImageJobOwnerLabelKey: ownerLabelValue,
},
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(imageList, eraserv1.GroupVersion.WithKind("ImageList")),
},
},
}
configmapList := &corev1.ConfigMapList{}
if err := r.List(ctx, configmapList); err != nil {
log.Info("Could not get list of configmaps")
return reconcile.Result{}, err
}
exclusionMount, exclusionVolume, err := util.GetExclusionVolume(configmapList)
if err != nil {
log.Info("Could not get exclusion mounts and volumes")
return reconcile.Result{}, err
}
for i := range jobTemplate.Spec.Containers {
jobTemplate.Spec.Containers[i].VolumeMounts = append(jobTemplate.Spec.Containers[i].VolumeMounts, exclusionMount...)
}
jobTemplate.Spec.Volumes = append(jobTemplate.Spec.Volumes, exclusionVolume...)
err = r.Create(ctx, job)
startTime = time.Now()
log.Info("creating imagejob", "job", job.Name)
if err != nil {
if errors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// get manager pod with label control-plane=controller-manager
podList := corev1.PodList{}
if err = r.List(ctx, &podList, client.InNamespace(eraserUtils.GetNamespace()), client.MatchingLabels{"control-plane": "controller-manager"}); err != nil {
log.Info("Unable to list controller-manager pod")
}
if len(podList.Items) != 1 {
log.Info("Incorrect number of controller-manager pods", "number of pods", len(podList.Items))
}
managerPod := &podList.Items[0]
template := corev1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: job.GetName(),
Namespace: eraserUtils.GetNamespace(),
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(managerPod, managerPod.GroupVersionKind()),
},
},
Template: jobTemplate,
}
err = r.Create(ctx, &template)
if err != nil {
return reconcile.Result{}, err
}
configMap.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(job, eraserv1.GroupVersion.WithKind("ImageJob"))}
err = r.Update(ctx, &configMap)
if err != nil {
return reconcile.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *Reconciler) handleJobCompletion(ctx context.Context, imageList *eraserv1.ImageList, job *eraserv1.ImageJob) error {
now := metav1.Now()
imageList.Status.Success = int64(job.Status.Succeeded)
imageList.Status.Failed = int64(job.Status.Failed)
imageList.Status.Skipped = int64(job.Status.Skipped)
imageList.Status.Timestamp = &now
err := r.Status().Update(ctx, imageList)
if err != nil {
return err
}
return nil
}
func add(mgr manager.Manager, r reconcile.Reconciler) error {
c, err := controller.New("imagelist-controller", mgr, controller.Options{
Reconciler: r,
})
if err != nil {
return err
}
err = c.Watch(
source.Kind(mgr.GetCache(), &eraserv1.ImageList{}),
&handler.EnqueueRequestForObject{}, predicate.GenerationChangedPredicate{})
if err != nil {
return err
}
err = c.Watch(
source.Kind(mgr.GetCache(), &eraserv1.ImageJob{}),
handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &eraserv1.ImageList{}),
predicate.Funcs{
// Do nothing on Create, Delete, or Generic events
CreateFunc: util.NeverOnCreate,
DeleteFunc: util.NeverOnDelete,
GenericFunc: util.NeverOnGeneric,
UpdateFunc: func(e event.UpdateEvent) bool {
if job, ok := e.ObjectNew.(*eraserv1.ImageJob); ok && util.IsCompletedOrFailed(job.Status.Phase) {
return ownerLabel.Matches(labels.Set(job.Labels))
}
return false
},
},
)
if err != nil {
return err
}
return nil
}
================================================
FILE: controllers/suite_test.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"path/filepath"
"testing"
ginkgov2 "github.com/onsi/ginkgo/v2"
gomega "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
eraserv1alpha2 "github.com/eraser-dev/eraser/api/v1alpha2"
//+kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var (
k8sClient client.Client
testEnv *envtest.Environment
)
func TestAPIs(t *testing.T) {
gomega.RegisterFailHandler(ginkgov2.Fail)
ginkgov2.RunSpecs(t, "Controller Suite")
}
var _ = ginkgov2.BeforeSuite(func() {
logf.SetLogger(zap.New(zap.WriteTo(ginkgov2.GinkgoWriter), zap.UseDevMode(true)))
ginkgov2.By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
ErrorIfCRDPathMissing: true,
}
cfg, err := testEnv.Start()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(cfg).NotTo(gomega.BeNil())
err = eraserv1alpha2.AddToScheme(scheme.Scheme)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
//+kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(k8sClient).NotTo(gomega.BeNil())
})
var _ = ginkgov2.AfterSuite(func() {
ginkgov2.By("tearing down the test environment")
err := testEnv.Stop()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
})
================================================
FILE: controllers/util/util.go
================================================
// Package util provides utility functions and constants for eraser controllers.
package util
import (
"flag"
"os"
"time"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/event"
)
var (
RemoverImage = flag.String("remover-image", "", "remover image")
EraserConfigmapName = "eraser-manager-config"
)
func init() {
if configmapName := os.Getenv("ERASER_CONFIGMAP_NAME"); configmapName != "" {
EraserConfigmapName = configmapName
}
}
const (
ImageJobOwnerLabelKey = "eraser.sh/job-owner"
exclusionLabel = "eraser.sh/exclude.list=true"
EnvVarContainerdNamespaceKey = "CONTAINERD_NAMESPACE"
EnvVarContainerdNamespaceValue = "k8s.io"
CRIPath = "/run/cri/cri.sock"
)
func NeverOnCreate(_ event.CreateEvent) bool {
return false
}
func NeverOnDelete(_ event.DeleteEvent) bool {
return false
}
func NeverOnGeneric(_ event.GenericEvent) bool {
return false
}
func NeverOnUpdate(_ event.UpdateEvent) bool {
return false
}
func AlwaysOnCreate(_ event.CreateEvent) bool {
return true
}
func AlwaysOnDelete(_ event.DeleteEvent) bool {
return true
}
func AlwaysOnGeneric(_ event.GenericEvent) bool {
return true
}
func AlwaysOnUpdate(_ event.UpdateEvent) bool {
return true
}
func IsCompletedOrFailed(p eraserv1.JobPhase) bool {
return (p == eraserv1.PhaseCompleted || p == eraserv1.PhaseFailed)
}
func FilterJobListByOwner(jobs []eraserv1.ImageJob, owner *metav1.OwnerReference) []eraserv1.ImageJob {
ret := []eraserv1.ImageJob{}
for i := range jobs {
job := jobs[i]
for j := range job.OwnerReferences {
or := job.OwnerReferences[j]
if or.UID == owner.UID {
ret = append(ret, job)
break // inner
}
}
}
return ret
}
func FilterBatchJobListByOwner(jobs []batchv1.Job, owner *metav1.OwnerReference) []batchv1.Job {
ret := []batchv1.Job{}
for i := range jobs {
job := jobs[i]
for j := range job.OwnerReferences {
or := job.OwnerReferences[j]
if or.UID == owner.UID {
ret = append(ret, job)
break // inner
}
}
}
return ret
}
func After(t time.Time, seconds int64) *metav1.Time {
newT := metav1.NewTime(t.Add(time.Duration(seconds) * time.Second))
return &newT
}
func GetExclusionVolume(configmapList *corev1.ConfigMapList) ([]corev1.VolumeMount, []corev1.Volume, error) {
var exclusionMount []corev1.VolumeMount
var exclusionVolume []corev1.Volume
selector, err := labels.Parse(exclusionLabel)
if err != nil {
return nil, nil, err
}
for i := range configmapList.Items {
cm := configmapList.Items[i]
if selector.Matches(labels.Set(cm.Labels)) {
exclusionMount = append(exclusionMount, corev1.VolumeMount{MountPath: "exclude-" + cm.Name, Name: cm.Name})
exclusionVolume = append(exclusionVolume, corev1.Volume{
Name: cm.Name,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: cm.Name}},
},
})
}
}
return exclusionMount, exclusionVolume, nil
}
================================================
FILE: demo/README.md
================================================
# Eraser Demo
This demo is using [demo magic](https://github.com/paxtonhare/demo-magic) to execute.
## Prerequisites
The following should be installed and pathed:
- [Kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
- [Kubectl](https://kubernetes.io/docs/reference/kubectl/)
- [Docker](https://www.docker.com/)
- [Helm](https://helm.sh/docs/intro/install/)
Get Helm repo:
```
$ helm repo add eraser https://eraser-dev.github.io/eraser/charts
```
Run `demo.sh` to start the demo!
```
$ ./demo.sh
```
================================================
FILE: demo/demo-magic.sh
================================================
#!/usr/bin/env bash
###############################################################################
#
# demo-magic.sh
#
# Copyright (c) 2015 Paxton Hare
#
# This script lets you script demos in bash. It runs through your demo script when you press
# ENTER. It simulates typing and runs commands.
#
###############################################################################
# the speed to "type" the text
TYPE_SPEED=20
# no wait after "p" or "pe"
NO_WAIT=false
# if > 0, will pause for this amount of seconds before automatically proceeding with any p or pe
PROMPT_TIMEOUT=0
# don't show command number unless user specifies it
SHOW_CMD_NUMS=false
# handy color vars for pretty prompts
BLACK="\033[0;30m"
BLUE="\033[0;34m"
GREEN="\033[0;32m"
GREY="\033[0;90m"
CYAN="\033[0;36m"
RED="\033[0;31m"
PURPLE="\033[0;35m"
BROWN="\033[0;33m"
WHITE="\033[1;37m"
COLOR_RESET="\033[0m"
C_NUM=0
# prompt and command color which can be overriden
DEMO_PROMPT="$ "
DEMO_CMD_COLOR=$WHITE
DEMO_COMMENT_COLOR=$GREY
##
# prints the script usage
##
function usage() {
echo -e ""
echo -e "Usage: $0 [options]"
echo -e ""
echo -e "\tWhere options is one or more of:"
echo -e "\t-h\tPrints Help text"
echo -e "\t-d\tDebug mode. Disables simulated typing"
echo -e "\t-n\tNo wait"
echo -e "\t-w\tWaits max the given amount of seconds before proceeding with demo (e.g. '-w5')"
echo -e ""
}
##
# wait for user to press ENTER
# if $PROMPT_TIMEOUT > 0 this will be used as the max time for proceeding automatically
##
function wait() {
if [[ "$PROMPT_TIMEOUT" == "0" ]]; then
read -rs
else
read -rst "$PROMPT_TIMEOUT"
fi
}
##
# print command only. Useful for when you want to pretend to run a command
#
# takes 1 param - the string command to print
#
# usage: p "ls -l"
#
##
function p() {
if [[ ${1:0:1} == "#" ]]; then
cmd=$DEMO_COMMENT_COLOR$1$COLOR_RESET
else
cmd=$DEMO_CMD_COLOR$1$COLOR_RESET
fi
# render the prompt
x=$(PS1="$DEMO_PROMPT" "$BASH" --norc -i &1 | sed -n '${s/^\(.*\)exit$/\1/p;}')
# show command number is selected
if $SHOW_CMD_NUMS; then
printf "[$((++C_NUM))] $x"
else
printf "$x"
fi
# wait for the user to press a key before typing the command
if [ $NO_WAIT = false ]; then
wait
fi
if [[ -z $TYPE_SPEED ]]; then
echo -en "$cmd"
else
echo -en "$cmd" | pv -qL $[$TYPE_SPEED+(-2 + RANDOM%5)];
fi
# wait for the user to press a key before moving on
if [ $NO_WAIT = false ]; then
wait
fi
echo ""
}
##
# Prints and executes a command
#
# takes 1 parameter - the string command to run
#
# usage: pe "ls -l"
#
##
function pe() {
# print the command
p "$@"
run_cmd "$@"
}
##
# print and executes a command immediately
#
# takes 1 parameter - the string command to run
#
# usage: pei "ls -l"
#
##
function pei {
NO_WAIT=true pe "$@"
}
##
# Enters script into interactive mode
#
# and allows newly typed commands to be executed within the script
#
# usage : cmd
#
##
function cmd() {
# render the prompt
x=$(PS1="$DEMO_PROMPT" "$BASH" --norc -i &1 | sed -n '${s/^\(.*\)exit$/\1/p;}')
printf "$x\033[0m"
read command
run_cmd "${command}"
}
function run_cmd() {
function handle_cancel() {
printf ""
}
trap handle_cancel SIGINT
stty -echoctl
eval "$@"
stty echoctl
trap - SIGINT
}
function check_pv() {
command -v pv >/dev/null 2>&1 || {
echo ""
echo -e "${RED}##############################################################"
echo "# HOLD IT!! I require pv but it's not installed. Aborting." >&2;
echo -e "${RED}##############################################################"
echo ""
echo -e "${COLOR_RESET}Installing pv:"
echo ""
echo -e "${BLUE}Mac:${COLOR_RESET} $ brew install pv"
echo ""
echo -e "${BLUE}Other:${COLOR_RESET} http://www.ivarch.com/programs/pv.shtml"
echo -e "${COLOR_RESET}"
exit 1;
}
}
check_pv
#
# handle some default params
# -h for help
# -d for disabling simulated typing
#
while getopts ":dhncw:" opt; do
case $opt in
h)
usage
exit 1
;;
d)
unset TYPE_SPEED
;;
n)
NO_WAIT=true
;;
c)
SHOW_CMD_NUMS=true
;;
w)
PROMPT_TIMEOUT=$OPTARG
;;
esac
done
================================================
FILE: demo/demo.sh
================================================
#!/bin/bash
########################
# include the magic
########################
. demo-magic.sh
# boostrap environment
pei "kind create cluster --name eraser-demo"
sleep 10
pei "kubectl apply -f ds.yaml"
sleep 10
clear
# demo commands
pei "kubectl get pods"
sleep 10
pei "kubectl delete daemonset alpine"
sleep 5
pei "kubectl get pods"
pei "docker exec eraser-demo-control-plane ctr -n k8s.io images list | grep alpine"
pei "helm install -n eraser-system eraser eraser/eraser --create-namespace"
wait
pei "kubectl get po -n eraser-system"
wait
pei "kubectl get po -n eraser-system"
pei "docker exec eraser-demo-control-plane ctr -n k8s.io images list | grep alpine"
# teardown environment
kind delete cluster --name eraser-demo
================================================
FILE: demo/ds.yaml
================================================
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: alpine
spec:
selector:
matchLabels:
app: alpine
template:
metadata:
labels:
app: alpine
spec:
containers:
- name: alpine
image: docker.io/library/alpine:3.7.3
================================================
FILE: deploy/eraser.yaml
================================================
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: eraser-system
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: imagejobs.eraser.sh
spec:
group: eraser.sh
names:
kind: ImageJob
listKind: ImageJobList
plural: imagejobs
singular: imagejob
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ImageJob is the Schema for the imagejobs API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
status:
description: ImageJobStatus defines the observed state of ImageJob.
properties:
deleteAfter:
description: Time to delay deletion until
format: date-time
type: string
desired:
description: desired number of pods
type: integer
failed:
description: number of pods that failed
type: integer
phase:
description: job running, successfully completed, or failed
type: string
skipped:
description: number of nodes that were skipped e.g. because they are not a linux node
type: integer
succeeded:
description: number of pods that completed successfully
type: integer
required:
- desired
- failed
- phase
- skipped
- succeeded
type: object
type: object
served: true
storage: true
subresources:
status: {}
- deprecated: true
deprecationWarning: v1alpha1 of the eraser API has been deprecated. Please migrate to v1.
name: v1alpha1
schema:
openAPIV3Schema:
description: ImageJob is the Schema for the imagejobs API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
status:
description: ImageJobStatus defines the observed state of ImageJob.
properties:
deleteAfter:
description: Time to delay deletion until
format: date-time
type: string
desired:
description: desired number of pods
type: integer
failed:
description: number of pods that failed
type: integer
phase:
description: job running, successfully completed, or failed
type: string
skipped:
description: number of nodes that were skipped e.g. because they are not a linux node
type: integer
succeeded:
description: number of pods that completed successfully
type: integer
required:
- desired
- failed
- phase
- skipped
- succeeded
type: object
type: object
served: true
storage: false
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: imagelists.eraser.sh
spec:
group: eraser.sh
names:
kind: ImageList
listKind: ImageListList
plural: imagelists
singular: imagelist
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ImageList is the Schema for the imagelists API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: ImageListSpec defines the desired state of ImageList.
properties:
images:
description: The list of non-compliant images to delete if non-running.
items:
type: string
type: array
required:
- images
type: object
status:
description: ImageListStatus defines the observed state of ImageList.
properties:
failed:
description: Number of nodes that failed to run the job
format: int64
type: integer
skipped:
description: Number of nodes that were skipped due to a skip selector
format: int64
type: integer
success:
description: Number of nodes that successfully ran the job
format: int64
type: integer
timestamp:
description: Information when the job was completed.
format: date-time
type: string
required:
- failed
- skipped
- success
- timestamp
type: object
type: object
served: true
storage: true
subresources:
status: {}
- deprecated: true
deprecationWarning: v1alpha1 of the eraser API has been deprecated. Please migrate to v1.
name: v1alpha1
schema:
openAPIV3Schema:
description: ImageList is the Schema for the imagelists API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: ImageListSpec defines the desired state of ImageList.
properties:
images:
description: The list of non-compliant images to delete if non-running.
items:
type: string
type: array
required:
- images
type: object
status:
description: ImageListStatus defines the observed state of ImageList.
properties:
failed:
description: Number of nodes that failed to run the job
format: int64
type: integer
skipped:
description: Number of nodes that were skipped due to a skip selector
format: int64
type: integer
success:
description: Number of nodes that successfully ran the job
format: int64
type: integer
timestamp:
description: Information when the job was completed.
format: date-time
type: string
required:
- failed
- skipped
- success
- timestamp
type: object
type: object
served: true
storage: false
subresources:
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: eraser-controller-manager
namespace: eraser-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: eraser-imagejob-pods
namespace: eraser-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: eraser-manager-role
namespace: eraser-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- podtemplates
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: eraser-manager-role
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagejobs
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagejobs/status
verbs:
- get
- patch
- update
- apiGroups:
- eraser.sh
resources:
- imagelists
verbs:
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagelists/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: eraser-manager-rolebinding
namespace: eraser-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: eraser-manager-role
subjects:
- kind: ServiceAccount
name: eraser-controller-manager
namespace: eraser-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: eraser-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: eraser-manager-role
subjects:
- kind: ServiceAccount
name: eraser-controller-manager
namespace: eraser-system
---
apiVersion: v1
data:
controller_manager_config.yaml: |
apiVersion: eraser.sh/v1alpha3
kind: EraserConfig
manager:
runtime:
name: containerd
address: unix:///run/containerd/containerd.sock
otlpEndpoint: ""
logLevel: info
scheduling:
repeatInterval: 24h
beginImmediately: true
profile:
enabled: false
port: 6060
imageJob:
successRatio: 1.0
cleanup:
delayOnSuccess: 0s
delayOnFailure: 24h
pullSecrets: [] # image pull secrets for collector/scanner/eraser
priorityClassName: "" # priority class name for collector/scanner/eraser
additionalPodLabels: {}
nodeFilter:
type: exclude # must be either exclude|include
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
components:
collector:
enabled: true
image:
repo: ghcr.io/eraser-dev/collector
tag: v1.5.0-beta.0
request:
mem: 25Mi
cpu: 7m
limit:
mem: 500Mi
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#how-pods-with-resource-limits-are-run
cpu: 0
scanner:
enabled: true
image:
repo: ghcr.io/eraser-dev/eraser-trivy-scanner # supply custom image for custom scanner
tag: v1.5.0-beta.0
request:
mem: 500Mi
cpu: 1000m
limit:
mem: 2Gi
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#how-pods-with-resource-limits-are-run
cpu: 0
# The config needs to be passed through to the scanner as yaml, as a
# single string. Because we allow custom scanner images, the scanner is
# responsible for defining a schema, parsing, and validating.
config: |
# this is the schema for the provided 'trivy-scanner'. custom scanners
# will define their own configuration.
cacheDir: /var/lib/trivy
dbRepo: ghcr.io/aquasecurity/trivy-db
deleteFailedImages: true
deleteEOLImages: true
vulnerabilities:
ignoreUnfixed: false
types:
- os
- library
securityChecks:
- vuln
severities:
- CRITICAL
- HIGH
- MEDIUM
- LOW
ignoredStatuses:
timeout:
total: 23h
perImage: 1h
volumes: []
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.5.0-beta.0
request:
mem: 25Mi
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#how-pods-with-resource-limits-are-run
cpu: 0
limit:
mem: 30Mi
cpu: 0
kind: ConfigMap
metadata:
name: eraser-manager-config
namespace: eraser-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
control-plane: controller-manager
name: eraser-controller-manager
namespace: eraser-system
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
template:
metadata:
labels:
control-plane: controller-manager
spec:
containers:
- args:
- --config=/config/controller_manager_config.yaml
command:
- /manager
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: OTEL_SERVICE_NAME
value: eraser-manager
image: ghcr.io/eraser-dev/eraser-manager:v1.5.0-beta.0
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /config
name: manager-config
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: eraser-controller-manager
terminationGracePeriodSeconds: 10
volumes:
- configMap:
name: eraser-manager-config
name: manager-config
================================================
FILE: docs/README.md
================================================
# Website
This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
### Installation
```
$ yarn
```
### Local Development
```
$ yarn start
```
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
### Build
```
$ yarn build
```
This command generates static content into the `build` directory and can be served using any static contents hosting service.
### Deployment
Using SSH:
```
$ USE_SSH=true yarn deploy
```
Not using SSH:
```
$ GIT_USER= yarn deploy
```
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.
## Search
Eraser docs website uses Algolia DocSearch service. Please see [here](https://docusaurus.io/docs/search) for more information.
If the search index has any issues:
1. Go to [Algolia search dashboard](https://www.algolia.com/apps/X8MU4GEC0G/explorer/browse/eraser)
1. Click manage index and export configuration
1. Delete the index
1. Import saved configuration
1. Go to [Algolia crawler](https://crawler.algolia.com/admin/crawlers/acc2bdb5-4780-433f-a3e9-bb3b49598320/overview) and restart crawling manually (takes about a few minutes to crawl). This is scheduled to run every week automatically.
================================================
FILE: docs/babel.config.js
================================================
module.exports = {
presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
};
================================================
FILE: docs/design/README.md
================================================
# Design Docs
## Implemented
* [ImageList and Collector/Scanner Design](https://docs.google.com/document/d/1Rz1bkZKZSLVMjC_w8WLASPDUjfU80tjV-XWUXZ8vq3I/edit?usp=sharing)
## Proposed
* [Image exclusion](https://docs.google.com/document/d/1ksUziJzNSVpwCagqmAzZOKllwbmZg0q2xJNUDUcQP2U/edit)
================================================
FILE: docs/docs/architecture.md
================================================
---
title: Architecture
---
At a high level, Eraser has two main modes of operation: manual and automated.
Manual image removal involves supplying a list of images to remove; Eraser then
deploys pods to clean up the images you supplied.
Automated image removal runs on a timer. By default, the automated process
removes images based on the results of a vulnerability scan. The default
vulnerability scanner is Trivy, but others can be provided in its place. Or,
the scanner can be disabled altogether, in which case Eraser acts as a garbage
collector -- it will remove all non-running images in your cluster.
## Manual image cleanup
## Automated analysis, scanning, and cleanup
================================================
FILE: docs/docs/code-of-conduct.md
================================================
---
title: Code of Conduct
---
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
Resources:
- [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
- [Code of Conduct Reporting](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
================================================
FILE: docs/docs/contributing.md
================================================
---
title: Contributing
---
There are several ways to get involved with Eraser
- Join the [mailing list](https://groups.google.com/u/1/g/eraser-dev) to get notifications for releases, security announcements, etc.
- Participate in the [biweekly community meetings](https://docs.google.com/document/d/1Sj5u47K3WUGYNPmQHGFpb52auqZb1FxSlWAQnPADhWI/edit) to disucss development, issues, use cases, etc.
- Join the `#eraser` channel on the [Kubernetes Slack](https://slack.k8s.io/)
- View the [development setup instructions](https://eraser-dev.github.io/eraser/docs/development)
This project welcomes contributions and suggestions.
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
================================================
FILE: docs/docs/custom-scanner.md
================================================
---
title: Custom Scanner
---
## Creating a Custom Scanner
To create a custom scanner for non-compliant images, use the following [template](https://github.com/eraser-dev/eraser-scanner-template/).
In order to customize your scanner, start by creating a `NewImageProvider()`. The ImageProvider interface can be found can be found [here](../../pkg/scanners/template/scanner_template.go).
The ImageProvider will allow you to retrieve the list of all non-running and non-excluded images from the collector container through the `ReceiveImages()` function. Process these images with your customized scanner and threshold, and use `SendImages()` to pass the images found non-compliant to the eraser container for removal. Finally, complete the scanning process by calling `Finish()`.
When complete, provide your custom scanner image to Eraser in deployment.
================================================
FILE: docs/docs/customization.md
================================================
---
title: Customization
---
## Overview
Eraser uses a configmap to configure its behavior. The configmap is part of the
deployment and it is not necessary to deploy it manually. Once deployed, the configmap
can be edited at any time:
```bash
kubectl edit configmap --namespace eraser-system eraser-manager-config
```
If an eraser job is already running, the changes will not take effect until the job completes.
The configuration is in yaml.
## Key Concepts
### Basic architecture
The _manager_ runs as a pod in your cluster and manages _ImageJobs_. Think of
an _ImageJob_ as a unit of work, performed on every node in your cluster. Each
node runs a sub-job. The goal of the _ImageJob_ is to assess the images on your
cluster's nodes, and to remove the images you don't want. There are two stages:
1. Assessment
1. Removal.
### Scheduling
An _ImageJob_ can either be created on-demand (see [Manual Removal](https://eraser-dev.github.io/eraser/docs/manual-removal)),
or they can be spawned on a timer like a cron job. On-demand jobs skip the
assessment stage and get right down to the business of removing the images you
specified. The behavior of an on-demand job is quite different from that of
timed jobs.
### Fault Tolerance
Because an _ImageJob_ runs on every node in your cluster, and the conditions on
each node may vary widely, some of the sub-jobs may fail. If you cannot
tolerate any failure, set the `manager.imageJob.successRatio` property to
`1.0`. If 75% success sounds good to you, set it to `0.75`. In that case, if
fewer than 75% of the pods spawned by the _ImageJob_ report success, the job as
a whole will be marked as a failure.
This is mainly to help diagnose error conditions. As such, you can set
`manager.imageJob.cleanup.delayOnFailure` to a long value so that logs can be
captured before the spawned pods are cleaned up.
### Excluding Nodes
For various reasons, you may want to prevent Eraser from scheduling pods on
certain nodes. To do so, the nodes can be given a special label. By default,
this label is `eraser.sh/cleanup.filter`, but you can configure the behavior with
the options under `manager.nodeFilter`. The [table](#detailed-options) provides more detail.
### Configuring Components
An _ImageJob_ is made up of various sub-jobs, with one sub-job for each node.
These sub-jobs can be broken down further into three stages.
1. Collection (What is on the node?)
1. Scanning (What images conform to the policy I've provided?)
1. Removal (Remove images based on the results of the above)
Of the above stages, only Removal is mandatory. The others can be disabled.
Furthermore, manually triggered _ImageJobs_ will skip right to removal, even if
Eraser is configured to collect and scan. Collection and Scanning will only
take place when:
1. The collector and/or scanner `components` are enabled, AND
1. The job was *not* triggered manually by creating an _ImageList_.
Disabling scanner will remove all non-running images by default.
### Swapping out components
The collector, scanner, and remover components can all be swapped out. This
enables you to build and host the images yourself. In addition, the scanner's
behavior can be completely tailored to your needs by swapping out the default
image with one of your own. To specify the images, use the
`components..image.repo` and `components..image.tag`,
where `` is one of `collector`, `scanner`, or `remover`.
## Universal Options
The following portions of the configmap apply no matter how you spawn your
_ImageJob_. The values provided below are the defaults. For more detail on
these options, see the [table](#detailed-options).
```yaml
manager:
runtime:
name: containerd
address: unix:///run/containerd/containerd.sock
otlpEndpoint: "" # empty string disables OpenTelemetry
logLevel: info
profile:
enabled: false
port: 6060
imageJob:
successRatio: 1.0
cleanup:
delayOnSuccess: 0s
delayOnFailure: 24h
pullSecrets: [] # image pull secrets for collector/scanner/remover
priorityClassName: "" # priority class name for collector/scanner/remover
additionalPodLabels: {}
extraScannerVolumes: {}
extraScannerVolumeMounts: {}
nodeFilter:
type: exclude # must be either exclude|include
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
components:
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Component Options
```yaml
components:
collector:
enabled: true
image:
repo: ghcr.io/eraser-dev/collector
tag: v1.0.0
request:
mem: 25Mi
cpu: 7m
limit:
mem: 500Mi
cpu: 0
scanner:
enabled: true
image:
repo: ghcr.io/eraser-dev/eraser-trivy-scanner
tag: v1.0.0
request:
mem: 500Mi
cpu: 1000m
limit:
mem: 2Gi
cpu: 0
config: |
# this is the schema for the provided 'trivy-scanner'. custom scanners
# will define their own configuration. see the below
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Scanner Options
These options can be provided to `components.scanner.config`. They will be
passed through as a string to the scanner container and parsed there. If you
want to configure your own scanner, you must provide some way to parse this.
Below are the values recognized by the provided `eraser-trivy-scanner` image.
Values provided below are the defaults.
```yaml
cacheDir: /var/lib/trivy # The file path inside the container to store the cache
dbRepo: ghcr.io/aquasecurity/trivy-db # The container registry from which to fetch the trivy database
deleteFailedImages: true # if true, remove images for which scanning fails, regardless of why it failed
deleteEOLImages: true # if true, remove images that have reached their end-of-life date
vulnerabilities:
ignoreUnfixed: true # consider the image compliant if there are no known fixes for the vulnerabilities found.
types: # a list of vulnerability types. for more info, see trivy's documentation.
- os
- library
securityChecks: # see trivy's documentation for more information
- vuln
severities: # in this case, only flag images with CRITICAL vulnerability for removal
- CRITICAL
ignoredStatuses: # a list of trivy statuses to ignore. See https://aquasecurity.github.io/trivy/v0.44/docs/configuration/filtering/#by-status.
timeout:
total: 23h # if scanning isn't completed before this much time elapses, abort the whole scan
perImage: 1h # if scanning a single image exceeds this time, scanning will be aborted
```
## Detailed Options
| Option | Description | Default |
| --- | --- | --- |
| manager.runtime.name | The runtime to use for the manager's containers. Must be one of containerd, crio, or dockershim. It is assumed that your nodes are all using the same runtime, and there is currently no way to configure multiple runtimes. | containerd |
| manager.runtime.address | The runtime socket address to use for the containers. Can provide a custom address for containerd and dockershim runtimes, but not for crio due to Trivy restrictions. | unix:///run/containerd/containerd.sock |
| manager.otlpEndpoint | The endpoint to send OpenTelemetry data to. If empty, data will not be sent. | "" |
| manager.logLevel | The log level for the manager's containers. Must be one of debug, info, warn, error, dpanic, panic, or fatal. | info |
| manager.scheduling.repeatInterval | Use only when collector ando/or scanner are enabled. This is like a cron job, and will spawn an _ImageJob_ at the interval provided. | 24h |
| manager.scheduling.beginImmediately | If set to true, the fist _ImageJob_ will run immediately. If false, the job will not be spawned until after the interval (above) has elapsed. | true |
| manager.profile.enabled | Whether to enable profiling for the manager's containers. This is for debugging with `go tool pprof`. | false |
| manager.profile.port | The port on which to expose the profiling endpoint. | 6060 |
| manager.imageJob.successRatio | The ratio of successful image jobs required before a cleanup is performed. | 1.0 |
| manager.imageJob.cleanup.delayOnSuccess | The amount of time to wait after a successful image job before performing cleanup. | 0s |
| manager.imageJob.cleanup.delayOnFailure | The amount of time to wait after a failed image job before performing cleanup. | 24h |
| manager.pullSecrets | The image pull secrets to use for collector, scanner, and remover containers. | [] |
| manager.priorityClassName | The priority class to use for collector, scanner, and remover containers. | "" |
| manager.additionalPodLabels | Additional labels for all pods that the controller creates at runtime. | `{}` |
| manager.nodeFilter.type | The type of node filter to use. Must be either "exclude" or "include". | exclude |
| manager.nodeFilter.selectors | A list of selectors used to filter nodes. | [] |
| components.collector.enabled | Whether to enable the collector component. | true |
| components.collector.image.repo | The repository containing the collector image. | ghcr.io/eraser-dev/collector |
| components.collector.image.tag | The tag of the collector image. | v1.0.0 |
| components.collector.request.mem | The amount of memory to request for the collector container. | 25Mi |
| components.collector.request.cpu | The amount of CPU to request for the collector container. | 7m |
| components.collector.limit.mem | The maximum amount of memory the collector container is allowed to use. | 500Mi |
| components.collector.limit.cpu | The maximum amount of CPU the collector container is allowed to use. | 0 |
| components.scanner.enabled | Whether to enable the scanner component. | true |
| components.scanner.image.repo | The repository containing the scanner image. | ghcr.io/eraser-dev/eraser-trivy-scanner |
| components.scanner.image.tag | The tag of the scanner image. | v1.0.0 |
| components.scanner.request.mem | The amount of memory to request for the scanner container. | 500Mi |
| components.scanner.request.cpu | The amount of CPU to request for the scanner container. | 1000m |
| components.scanner.limit.mem | The maximum amount of memory the scanner container is allowed to use. | 2Gi |
| components.scanner.limit.cpu | The maximum amount of CPU the scanner container is allowed to use. | 0 |
| components.scanner.config | The configuration to pass to the scanner container, as a YAML string. | See YAML below |
| components.scanner.volumes | Extra volumes for scanner. | `{}` |
| components.remover.image.repo | The repository containing the remover image. | ghcr.io/eraser-dev/remover |
| components.remover.image.tag | The tag of the remover image. | v1.0.0 |
| components.remover.request.mem | The amount of memory to request for the remover container. | 25Mi |
| components.remover.request.cpu | The amount of CPU to request for the remover container. | 0 |
================================================
FILE: docs/docs/exclusion.md
================================================
---
title: Exclusion
---
## Excluding registries, repositories, and images
Eraser can exclude registries (example, `docker.io/library/*`) and also specific images with a tag (example, `docker.io/library/ubuntu:18.04`) or digest (example, `sha256:80f31da1ac7b312ba29d65080fd...`) from its removal process.
To exclude any images or registries from the removal, create configmap(s) with the label `eraser.sh/exclude.list=true` in the eraser-system namespace with a JSON file holding the excluded images.
```bash
$ cat > sample.json <<"EOF"
{
"excluded": [
"docker.io/library/*",
"ghcr.io/eraser-dev/test:latest"
]
}
EOF
$ kubectl create configmap excluded --from-file=sample.json --namespace=eraser-system
$ kubectl label configmap excluded eraser.sh/exclude.list=true -n eraser-system
```
## Exempting Nodes from the Eraser Pipeline
Exempting nodes from cleanup was added in v1.0.0. When deploying Eraser, you can specify whether there is a list of nodes you would like to `include` or `exclude` from the cleanup process using the configmap. For more information, see the section on [customization](https://eraser-dev.github.io/eraser/docs/customization).
================================================
FILE: docs/docs/faq.md
================================================
---
title: FAQ
---
## Why am I still seeing vulnerable images?
Eraser currently targets **non-running** images, so any vulnerable images that are currently running will not be removed. In addition, the default vulnerability scanning with Trivy removes images with `CRITICAL` vulnerabilities. Any images with lower vulnerabilities will not be removed. This can be configured using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#scanner-options).
## How is Eraser different from Kubernetes garbage collection?
The native garbage collection in Kubernetes works a bit differently than Eraser. By default, garbage collection begins when disk usage reaches 85%, and stops when it gets down to 80%. More details about Kubernetes garbage collection can be found in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/architecture/garbage-collection/), and configuration options can be found in the [Kubelet documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).
There are a couple core benefits to using Eraser for image cleanup:
* Eraser can be configured to use image vulnerability data when making determinations on image removal
* By interfacing directly with the container runtime, Eraser can clean up images that are not managed by Kubelet and Kubernetes
================================================
FILE: docs/docs/installation.md
================================================
---
title: Installation
---
## Manifest
To install Eraser with the manifest file, run the following command:
```bash
kubectl apply -f https://raw.githubusercontent.com/eraser-dev/eraser/main/deploy/eraser.yaml
```
## Helm
If you'd like to install and manage Eraser with Helm, follow the install instructions [here](https://github.com/eraser-dev/eraser/blob/main/charts/eraser/README.md)
================================================
FILE: docs/docs/introduction.md
================================================
---
title: Introduction
slug: /
---
# Introduction
When deploying to Kubernetes, it's common for pipelines to build and push images to a cluster, but it's much less common for these images to be cleaned up. This can lead to accumulating bloat on the disk, and a host of non-compliant images lingering on the nodes.
The current garbage collection process deletes images based on a percentage of load, but this process does not consider the vulnerability state of the images. **Eraser** aims to provide a simple way to determine the state of an image, and delete it if it meets the specified criteria.
================================================
FILE: docs/docs/manual-removal.md
================================================
---
title: Manual Removal
---
Create an `ImageList` and specify the images you would like to remove. In this case, the image `docker.io/library/alpine:3.7.3` will be removed.
```shell
cat < `ImageList` is a cluster-scoped resource and must be called imagelist. `"*"` can be specified to remove all non-running images instead of individual images.
Creating an `ImageList` should trigger an `ImageJob` that will deploy Eraser pods on every node to perform the removal given the list of images.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-55d54c4fb6-dcglq 1/1 Running 0 9m8s
eraser-system eraser-kind-control-plane 1/1 Running 0 11s
eraser-system eraser-kind-worker 1/1 Running 0 11s
eraser-system eraser-kind-worker2 1/1 Running 0 11s
```
Pods will run to completion and the images will be removed.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-6d6d5594d4-phl2q 1/1 Running 0 4m16s
eraser-system eraser-kind-control-plane 0/1 Completed 0 22s
eraser-system eraser-kind-worker 0/1 Completed 0 22s
eraser-system eraser-kind-worker2 0/1 Completed 0 22s
```
The `ImageList` custom resource status field will contain the status of the last job. The success and failure counts indicate the number of nodes the Eraser agent was run on.
```shell
$ kubectl describe ImageList imagelist
...
Status:
Failed: 0
Success: 3
Timestamp: 2022-02-25T23:41:55Z
...
```
Verify the unused images are removed.
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
```
If the image has been successfully removed, there will be no output.
================================================
FILE: docs/docs/metrics.md
================================================
---
title: Metrics
---
To view Eraser metrics, you will need to deploy an Open Telemetry collector in the 'eraser-system' namespace, and an exporter. An example collector with a Prometheus exporter is [otelcollector.yaml](https://github.com/eraser-dev/eraser/blob/main/test/e2e/test-data/otelcollector.yaml), and the endpoint can be specified using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#universal-options). In this example, we are logging the collected data to the otel-collector pod, and exporting metrics through Prometheus at 'http://localhost:8889/metrics', but a separate exporter can also be configured.
Below is the list of metrics provided by Eraser per run:
#### Eraser
```yaml
- count
- name: images_removed_run_total
- description: Total images removed by eraser
```
#### Scanner
```yaml
- count
- name: vulnerable_images_run_total
- description: Total vulnerable images detected
```
#### ImageJob
```yaml
- count
- name: imagejob_run_total
- description: Total ImageJobs scheduled
- name: pods_completed_run_total
- description: Total pods completed
- name: pods_failed_run_total
- description: Total pods failed
- summary
- name: imagejob_duration_run_seconds
- description: Total time for ImageJobs scheduled to complete
```
================================================
FILE: docs/docs/quick-start.md
================================================
---
title: Quick Start
---
This tutorial demonstrates the functionality of Eraser and validates that non-running images are removed succesfully.
## Deploy a DaemonSet
After following the [install instructions](installation.md), we'll apply a demo `DaemonSet`. For illustrative purposes, a DaemonSet is applied and deleted so the non-running images remain on all nodes. The alpine image with the `3.7.3` tag will be used in this example. This is an image with a known critical vulnerability.
First, apply the `DaemonSet`:
```shell
cat < 45m v1.24.0
kind-worker2 Ready 44m v1.24.0
```
List the images then filter for `alpine`:
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
docker.io/library/alpine:3.7.3 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
docker.io/library/alpine@sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
```
## Automatically Cleaning Images
After deploying Eraser, it will automatically clean images in a regular interval. This interval can be set using the `manager.scheduling.repeatInterval` setting in the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). The default interval is 24 hours (`24h`). Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
Eraser will schedule eraser pods to each node in the cluster, and each pod will contain 3 containers: collector, scanner, and remover that will run to completion.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system eraser-kind-control-plane-sb789 0/3 Completed 0 26m
eraser-system eraser-kind-worker-j84hm 0/3 Completed 0 26m
eraser-system eraser-kind-worker2-4lbdr 0/3 Completed 0 26m
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 26m
```
The collector container sends the list of all images to the scanner container, which scans and reports non-compliant images to the remover container for removal of images that are non-running. Once all pods are completed, they will be automatically cleaned up.
> If you want to remove all the images periodically, you can skip the scanner container by setting the `components.scanner.enabled` value to `false` using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). In this case, each collector pod will hold 2 containers: collector and remover.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system eraser-kind-control-plane-ksk2b 0/2 Completed 0 50s
eraser-system eraser-kind-worker-cpgqc 0/2 Completed 0 50s
eraser-system eraser-kind-worker2-k25df 0/2 Completed 0 50s
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 55s
```
================================================
FILE: docs/docs/release-management.md
================================================
# Release Management
## Overview
This document describes Eraser project release management, which includes release versioning, supported releases, and supported upgrades.
## Legend
- **X.Y.Z** refers to the version (git tag) of Eraser that is released. This is the version of the Eraser images and the Chart version.
- **Breaking changes** refer to schema changes, flag changes, and behavior changes of Eraser that may require a clean installation during upgrade, and it may introduce changes that could break backward compatibility.
- **Milestone** should be designed to include feature sets to accommodate 2 months release cycles including test gates. GitHub's milestones are used by maintainers to manage each release. PRs and Issues for each release should be created as part of a corresponding milestone.
- **Patch releases** refer to applicable fixes, including security fixes, may be backported to support releases, depending on severity and feasibility.
- **Test gates** should include soak tests and upgrade tests from the last minor version.
## Release Versioning
All releases will be of the form _vX.Y.Z_ where X is the major version, Y is the minor version and Z is the patch version. This project strictly follows semantic versioning.
The rest of the doc will cover the release process for the following kinds of releases:
**Major Releases**
No plan to move to 2.0.0 unless there is a major design change like an incompatible API change in the project
**Minor Releases**
- X.Y.0-alpha.W, W >= 0 (Branch : main)
- Released as needed before we cut a beta X.Y release
- Alpha release, cut from master branch
- X.Y.0-beta.W, W >= 0 (Branch : main)
- Released as needed before we cut a stable X.Y release
- More stable than the alpha release to signal users to test things out
- Beta release, cut from master branch
- X.Y.0-rc.W, W >= 0 (Branch : main)
- Released as needed before we cut a stable X.Y release
- soak for ~ 2 weeks before cutting a stable release
- Release candidate release, cut from master branch
- X.Y.0 (Branch: main)
- Released as needed
- Stable release, cut from master when X.Y milestone is complete
**Patch Releases**
- Patch Releases X.Y.Z, Z > 0 (Branch: release-X.Y, only cut when a patch is needed)
- No breaking changes
- Applicable fixes, including security fixes, may be cherry-picked from master into the latest supported minor release-X.Y branches.
- Patch release, cut from a release-X.Y branch
## Supported Releases
Applicable fixes, including security fixes, may be cherry-picked into the release branch, depending on severity and feasibility. Patch releases are cut from that branch as needed.
We expect users to stay reasonably up-to-date with the versions of Eraser they use in production, but understand that it may take time to upgrade. We expect users to be running approximately the latest patch release of a given minor release and encourage users to upgrade as soon as possible.
We expect to "support" n (current) and n-1 major.minor releases. "Support" means we expect users to be running that version in production. For example, when v1.2.0 comes out, v1.0.x will no longer be supported for patches, and we encourage users to upgrade to a supported version as soon as possible.
## Supported Kubernetes Versions
Eraser is assumed to be compatible with the [current Kubernetes Supported Versions](https://kubernetes.io/releases/patch-releases/#detailed-release-history-for-active-branches) per [Kubernetes Supported Versions policy](https://kubernetes.io/releases/version-skew-policy/).
For example, if Eraser _supported_ versions are v1.2 and v1.1, and Kubernetes _supported_ versions are v1.22, v1.23, v1.24, then all supported Eraser versions (v1.2, v1.1) are assumed to be compatible with all supported Kubernetes versions (v1.22, v1.23, v1.24). If Kubernetes v1.25 is released later, then Eraser v1.2 and v1.1 will be assumed to be compatible with v1.25 if those Eraser versions are still supported at that time.
If you choose to use Eraser with a version of Kubernetes that it does not support, you are using it at your own risk.
## Acknowledgement
This document builds on the ideas and implementations of release processes from projects like Kubernetes and Helm.
================================================
FILE: docs/docs/releasing.md
================================================
---
title: Releasing
---
## Create Release Pull Request
1. Go to `create_release_pull_request` workflow under actions.
2. Select run workflow, and use the workflow from your branch.
3. Input release version with the semantic version identifying the release.
4. Click run workflow and review the PR created by github-actions.
# Releasing
5. Once the PR is merged to `main`, tag that commit with release version and push tags to remote repository.
```
git checkout
git pull origin
git tag -a -m ''
git push origin
```
6. Pushing the release tag will trigger GitHub Actions to trigger `release` job.
This will build the `ghcr.io/eraser-dev/remover`, `ghcr.io/eraser-dev/eraser-manager`, `ghcr.io/eraser-dev/collector`, and `ghcr.io/eraser-dev/eraser-trivy-scanner` images automatically, then publish the new release tag.
## Publishing
1. GitHub Action will create a new release, review and edit it at https://github.com/eraser-dev/eraser/releases
## Notifying
1. Send an email to the [Eraser mailing list](https://groups.google.com/g/eraser-dev) announcing the release, with links to GitHub.
2. Post a message on the [Eraser Slack channel](https://kubernetes.slack.com/archives/C03Q8KV8YQ4) with the same information.
================================================
FILE: docs/docs/setup.md
================================================
---
title: Setup
---
# Development Setup
This document describes the steps to get started with development.
You can either utilize [Codespaces](https://docs.github.com/en/codespaces/overview) or setup a local environment.
## Local Setup
### Prerequisites:
- [go](https://go.dev/) with version 1.17 or later.
- [docker](https://docs.docker.com/get-docker/)
- [kind](https://kind.sigs.k8s.io/)
- `make`
### Get things running
- Get dependencies with `go get`
- This project uses `make`. You can utilize `make help` to see available targets. For local deployment make targets help to build, test and deploy.
### Making changes
Please refer to [Development Reference](#development-reference) for more details on the specific commands.
To test your changes on a cluster:
```bash
# generate necessary api files (optional - only needed if changes to api folder).
make generate
# build applicable images
make docker-build-manager MANAGER_IMG=eraser-manager:dev
make docker-build-remover REMOVER_IMG=remover:dev
make docker-build-collector COLLECTOR_IMG=collector:dev
make docker-build-trivy-scanner TRIVY_SCANNER_IMG=eraser-trivy-scanner:dev
# make sure updated image is present on cluster (e.g., see kind example below)
kind load docker-image \
eraser-manager:dev \
eraser-trivy-scanner:dev \
remover:dev \
collector:dev
make manifests
make deploy
# to remove the deployment
make undeploy
```
To test your changes to manager locally:
```bash
make run
```
Example Output:
```
you@local:~/eraser$ make run
docker build . \
-t eraser-tooling \
-f build/tooling/Dockerfile
[+] Building 7.8s (8/8) FINISHED
=> => naming to docker.io/library/eraser-tooling 0.0s
docker run -v /home/eraser/config:/config -w /config/manager \
registry.k8s.io/kustomize/kustomize:v3.8.9 edit set image controller=eraser-manager:dev
docker run -v /home/eraser:/eraser eraser-tooling controller-gen \
crd \
rbac:roleName=manager-role \
webhook \
paths="./..." \
output:crd:artifacts:config=config/crd/bases
rm -rf manifest_staging
mkdir -p manifest_staging/deploy
docker run --rm -v /home/eraser:/eraser \
registry.k8s.io/kustomize/kustomize:v3.8.9 build \
/eraser/config/default -o /eraser/manifest_staging/deploy/eraser.yaml
docker run -v /home/eraser:/eraser eraser-tooling controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
go fmt ./...
go vet ./...
go run ./main.go
{"level":"info","ts":1652985685.1663408,"logger":"controller-runtime.metrics","msg":"Metrics server is starting to listen","addr":":8080"}
...
```
## Development Reference
Eraser is using tooling from [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). For Eraser this tooling is containerized into the `eraser-tooling` image. The `make` targets can use this tooling and build the image when necessary.
You can override the default configuration using environment variables. Below you can find a reference of targets and configuration options.
### Common Configuration
| Environment Variable | Description |
| -------------------- | --------------------------------------------------------------------------------------------- |
| VERSION | Specifies the version (i.e., the image tag) of eraser to be used. |
| MANAGER_IMG | Defines the image url for the Eraser manager. Used for tagging, pulling and pushing the image |
| REMOVER_IMG | Defines the image url for the Eraser. Used for tagging, pulling and pushing the image |
| COLLECTOR_IMG | Defines the image url for the Collector. Used for tagging, pulling and pushing the image |
### Linting
- `make lint`
Lints the go code.
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------- |
| GOLANGCI_LINT | Specifies the go linting binary to be used for linting. |
### Development
- `make generate`
Generates necessary files for the k8s api stored under `api/v1alpha1/zz_generated.deepcopy.go`. See the [kubebuilder docs](https://book.kubebuilder.io/cronjob-tutorial/other-api-files.html) for details.
- `make manifests`
Generates the eraser deployment yaml files under `manifest_staging/deploy`.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------- |
| REMOVER_IMG | Defines the image url for the Eraser. |
| MANAGER_IMG | Defines the image url for the Eraser manager. |
| KUSTOMIZE_VERSION | Define Kustomize version for generating manifests. |
- `make test`
Runs the unit tests for the eraser project.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------- |
| ENVTEST | Specifies the envtest setup binary. |
| ENVTEST_K8S_VERSION | Specifies the Kubernetes version for envtest setup command. |
- `make e2e-test`
Runs e2e tests on a cluster.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------- |
| REMOVER_IMG | Eraser image to be used for e2e test. |
| MANAGER_IMG | Eraser manager image to be used for e2e test. |
| KUBERNETES_VERSION | Kubernetes version for e2e test. |
| TEST_COUNT | Sets repetition for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TIMEOUT | Sets timeout for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TESTFLAGS | Sets additional test flags |
### Build
- `make build`
Builds the eraser manager binaries.
- `make run`
Runs the eraser manager on your local machine.
- `make docker-build-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-remover`
Builds the docker image for eraser remover.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| REMOVER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-remover`
Builds the docker image for the eraser remover.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| REMOVER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
### Deployment
- `make install`
Install CRDs into the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make uninstall`
Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make deploy`
Deploys eraser to the cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
| MANAGER_IMG | Specifies the eraser manager image version to be used for deployment |
- `make undeploy`
Undeploy controller from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources that need to be removed. |
### Release
- `make release-manifest`
Generates k8s manifests files for a release.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------ |
| NEWVERSION | Sets the new version in the Makefile |
- `make promote-staging-manifest`
Promotes the k8s deployment yaml files to release.
================================================
FILE: docs/docs/trivy.md
================================================
---
title: Trivy
---
## Trivy Provider Options
The Trivy provider is used in Eraser for image scanning and detecting vulnerabilities. See [Customization](https://eraser-dev.github.io/eraser/docs/customization#scanner-options) for more details on configuring the scanner.
================================================
FILE: docs/docusaurus.config.js
================================================
// @ts-check
// Note: type annotations allow type checking and IDEs autocompletion
const lightCodeTheme = require('prism-react-renderer').themes.github;
const darkCodeTheme = require('prism-react-renderer').themes.dracula;
/** @type {import('@docusaurus/types').Config} */
const config = {
title: 'Eraser Docs',
url: 'https://eraser-dev.github.io',
baseUrl: '/eraser/docs/',
onBrokenLinks: 'warn',
onBrokenMarkdownLinks: 'warn',
favicon: 'img/favicon.ico',
trailingSlash: false,
// GitHub pages deployment config.
// If you aren't using GitHub pages, you don't need these.
organizationName: 'eraser-dev', // Usually your GitHub org/user name.
projectName: 'Eraser', // Usually your repo name.
deploymentBranch: 'gh-pages',
// Even if you don't use internalization, you can use this field to set useful
// metadata like html lang. For example, if your site is Chinese, you may want
// to replace "en" with "zh-Hans".
i18n: {
defaultLocale: 'en',
locales: ['en'],
},
presets: [
[
'classic',
/** @type {import('@docusaurus/preset-classic').Options} */
({
docs: {
sidebarPath: require.resolve('./sidebars.js'),
routeBasePath: '/'
},
blog: false,
theme: {
customCss: require.resolve('./src/css/custom.css'),
},
gtag: {
trackingID: 'G-QV5PNCJ560',
anonymizeIP: true,
},
}),
],
],
themeConfig:
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
({
navbar: {
title: 'Eraser',
logo: {
alt: 'Eraser Logo',
src: 'img/eraser.svg',
},
items: [
{
type: 'docsVersionDropdown',
position: 'right',
},
{
href: 'https://github.com/eraser-dev/eraser',
position: 'right',
className: 'header-github-link',
'aria-label': 'GitHub repository',
},
],
},
footer: {
style: 'dark',
copyright: `Copyright © ${new Date().getFullYear()} Linux Foundation. The Linux Foundation has registered trademarks and uses trademarks. For a list of trademarks of The Linux Foundation, please see our Trademark Usage page.`,
},
prism: {
theme: lightCodeTheme,
darkTheme: darkCodeTheme,
},
algolia: {
appId: 'X8MU4GEC0G',
apiKey: 'aaca7901c07e616a7ec2e1e1f9670809',
indexName: 'eraser',
},
colorMode: {
defaultMode: 'light',
disableSwitch: false,
respectPrefersColorScheme: true,
}
}),
};
module.exports = config;
================================================
FILE: docs/package.json
================================================
{
"name": "website",
"version": "0.0.0",
"private": true,
"scripts": {
"docusaurus": "docusaurus",
"start": "docusaurus start",
"build": "docusaurus build",
"swizzle": "docusaurus swizzle",
"deploy": "docusaurus deploy",
"clear": "docusaurus clear",
"serve": "docusaurus serve",
"write-translations": "docusaurus write-translations",
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "^3.9.2",
"@docusaurus/preset-classic": "^3.9.2",
"@docusaurus/theme-classic": "^3.9.2",
"@mdx-js/react": "^3.1.1",
"clsx": "^2.1.1",
"got": "^14.6.5",
"js-yaml": "^3.14.2",
"on-headers": "^1.1.0",
"path-to-regexp": "^1.8.0",
"prism-react-renderer": "^2.4.1",
"react": "^19.2.0",
"react-dom": "^19.2.0",
"react-router": "^6.28.1",
"react-router-dom": "^6.28.1",
"trim": "^1.0.1"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "3.9.2"
},
"browserslist": {
"production": [
">0.5%",
"not dead",
"not op_mini all"
],
"development": [
"last 1 chrome version",
"last 1 firefox version",
"last 1 safari version"
]
},
"resolutions": {
"trim": "^0.0.3",
"got": "^11.8.5",
"js-yaml": "^3.14.2",
"path-to-regexp": "^1.8.0",
"on-headers": "^1.1.0"
}
}
================================================
FILE: docs/sidebars.js
================================================
/**
* Creating a sidebar enables you to:
- create an ordered group of docs
- render a sidebar for each doc of that group
- provide next/previous navigation
The sidebars can be generated from the filesystem, or explicitly defined here.
Create as many sidebars as you want.
*/
// @ts-check
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
const sidebars = {
sidebar: [
'introduction',
'installation',
'quick-start',
'architecture',
{
type: 'category',
label: 'Topics',
collapsible: true,
collapsed: false,
items: [
'manual-removal',
'exclusion',
'customization',
'metrics'
]
},
{
type: 'category',
label: 'Development',
collapsible: true,
collapsed: false,
items: [
'setup',
'releasing',
]
},
{
type: 'category',
label: 'Scanning',
collapsible: true,
collapsed: false,
items: [
'custom-scanner',
'trivy',
]
},
'faq',
'contributing',
'code-of-conduct',
]
};
module.exports = sidebars;
================================================
FILE: docs/src/css/custom.css
================================================
/**
* Any CSS included here will be global. The classic template
* bundles Infima by default. Infima is a CSS framework designed to
* work well for content-centric websites.
*/
/* You can override the default Infima variables here. */
:root {
--ifm-color-primary: #2e8555;
--ifm-color-primary-dark: #29784c;
--ifm-color-primary-darker: #277148;
--ifm-color-primary-darkest: #205d3b;
--ifm-color-primary-light: #33925d;
--ifm-color-primary-lighter: #359962;
--ifm-color-primary-lightest: #3cad6e;
--ifm-code-font-size: 95%;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
}
/* For readability concerns, you should choose a lighter palette in dark mode. */
[data-theme='dark'] {
--ifm-color-primary: #25c2a0;
--ifm-color-primary-dark: #21af90;
--ifm-color-primary-darker: #1fa588;
--ifm-color-primary-darkest: #1a8870;
--ifm-color-primary-light: #29d5b0;
--ifm-color-primary-lighter: #32d8b4;
--ifm-color-primary-lightest: #4fddbf;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
}
.header-github-link:hover {
opacity: 0.6;
}
.header-github-link:before {
content: '';
width: 24px;
height: 24px;
display: flex;
background: url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E")
no-repeat;
}
html[data-theme='dark'] .header-github-link:before {
background: url("data:image/svg+xml,%3Csvg viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath fill='white' d='M12 .297c-6.63 0-12 5.373-12 12 0 5.303 3.438 9.8 8.205 11.385.6.113.82-.258.82-.577 0-.285-.01-1.04-.015-2.04-3.338.724-4.042-1.61-4.042-1.61C4.422 18.07 3.633 17.7 3.633 17.7c-1.087-.744.084-.729.084-.729 1.205.084 1.838 1.236 1.838 1.236 1.07 1.835 2.809 1.305 3.495.998.108-.776.417-1.305.76-1.605-2.665-.3-5.466-1.332-5.466-5.93 0-1.31.465-2.38 1.235-3.22-.135-.303-.54-1.523.105-3.176 0 0 1.005-.322 3.3 1.23.96-.267 1.98-.399 3-.405 1.02.006 2.04.138 3 .405 2.28-1.552 3.285-1.23 3.285-1.23.645 1.653.24 2.873.12 3.176.765.84 1.23 1.91 1.23 3.22 0 4.61-2.805 5.625-5.475 5.92.42.36.81 1.096.81 2.22 0 1.606-.015 2.896-.015 3.286 0 .315.21.69.825.57C20.565 22.092 24 17.592 24 12.297c0-6.627-5.373-12-12-12'/%3E%3C/svg%3E")
no-repeat;
}
================================================
FILE: docs/static/.nojekyll
================================================
================================================
FILE: docs/versioned_docs/version-v0.4.x/architecture.md
================================================
---
title: Architecture
---
At a high level, Eraser has two main modes of operation: manual and automated.
Manual image removal involves supplying a list of images to remove; Eraser then
deploys pods to clean up the images you supplied.
Automated image removal runs on a timer. By default, the automated process
removes images based on the results of a vulnerability scan. The default
vulnerability scanner is Trivy, but others can be provided in its place. Or,
the scanner can be disabled altogether, in which case Eraser acts as a garbage
collector -- it will remove all non-running images in your cluster.
## Manual image cleanup
Note: metrics are not yet implemented in Eraser v0.4.x, but will be available in the upcoming v1.0.0 release.
## Automated analysis, scanning, and cleanup
================================================
FILE: docs/versioned_docs/version-v0.4.x/code-of-conduct.md
================================================
---
title: Code of Conduct
---
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
Resources:
- [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
- [Code of Conduct Reporting](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
================================================
FILE: docs/versioned_docs/version-v0.4.x/contributing.md
================================================
---
title: Contributing
---
There are several ways to get involved with Eraser
- Join the [mailing list](https://groups.google.com/u/1/g/eraser-dev) to get notifications for releases, security announcements, etc.
- Participate in the [biweekly community meetings](https://docs.google.com/document/d/1Sj5u47K3WUGYNPmQHGFpb52auqZb1FxSlWAQnPADhWI/edit) to disucss development, issues, use cases, etc.
- Join the `#eraser` channel on the [Kubernetes Slack](https://slack.k8s.io/)
- View the [development setup instructions](https://eraser-dev.github.io/eraser/docs/development)
This project welcomes contributions and suggestions.
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
================================================
FILE: docs/versioned_docs/version-v0.4.x/custom-scanner.md
================================================
---
title: Custom Scanner
---
## Creating a Custom Scanner
To create a custom scanner for non-compliant images, provide your scanner image to Eraser in deployment.
In order for the custom scanner to communicate with the collector and eraser containers, utilize `ReadCollectScanPipe()` to get the list of all non-running images to scan from collector. Then, use `WriteScanErasePipe()` to pass the images found non-compliant by your scanner to eraser for removal. Both functions can be found in [util](../../../pkg/utils/utils.go).
================================================
FILE: docs/versioned_docs/version-v0.4.x/customization.md
================================================
---
title: Customization
---
By default, successful jobs will be deleted after a period of time. You can change this behavior by setting the following flags in the eraser-controller-manager:
- `--job-cleanup-on-success-delay`: Duration to delay job deletion after successful runs. 0 means no delay. Defaults to `0`.
- `--job-cleanup-on-error-delay`: Duration to delay job deletion after errored runs. 0 means no delay. Defaults to `24h`.
- `--job-success-ratio`: Ratio of successful/total runs to consider a job successful. 1.0 means all runs must succeed. Defaults to `1.0`.
For duration, valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
================================================
FILE: docs/versioned_docs/version-v0.4.x/exclusion.md
================================================
---
title: Exclusion
---
## Excluding registries, repositories, and images
Eraser can exclude registries (example, `docker.io/library/*`) and also specific images with a tag (example, `docker.io/library/ubuntu:18.04`) or digest (example, `sha256:80f31da1ac7b312ba29d65080fd...`) from its removal process.
To exclude any images or registries from the removal, create configmap(s) with the label `eraser.sh/exclude.list=true` in the eraser-system namespace with a JSON file holding the excluded images.
```bash
$ cat > sample.json < `ImageList` is a cluster-scoped resource and must be called imagelist. `"*"` can be specified to remove all non-running images instead of individual images.
Creating an `ImageList` should trigger an `ImageJob` that will deploy Eraser pods on every node to perform the removal given the list of images.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-55d54c4fb6-dcglq 1/1 Running 0 9m8s
eraser-system eraser-kind-control-plane 1/1 Running 0 11s
eraser-system eraser-kind-worker 1/1 Running 0 11s
eraser-system eraser-kind-worker2 1/1 Running 0 11s
```
Pods will run to completion and the images will be removed.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-6d6d5594d4-phl2q 1/1 Running 0 4m16s
eraser-system eraser-kind-control-plane 0/1 Completed 0 22s
eraser-system eraser-kind-worker 0/1 Completed 0 22s
eraser-system eraser-kind-worker2 0/1 Completed 0 22s
```
The `ImageList` custom resource status field will contain the status of the last job. The success and failure counts indicate the number of nodes the Eraser agent was run on.
```shell
$ kubectl describe ImageList imagelist
...
Status:
Failed: 0
Success: 3
Timestamp: 2022-02-25T23:41:55Z
...
```
Verify the unused images are removed.
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
```
If the image has been successfully removed, there will be no output.
================================================
FILE: docs/versioned_docs/version-v0.4.x/quick-start.md
================================================
---
title: Quick Start
---
This tutorial demonstrates the functionality of Eraser and validates that non-running images are removed succesfully.
## Deploy a DaemonSet
After following the [install instructions](installation.md), we'll apply a demo `DaemonSet`. For illustrative purposes, a DaemonSet is applied and deleted so the non-running images remain on all nodes. The alpine image with the `3.7.3` tag will be used in this example. This is an image with a known critical vulnerability.
First, apply the `DaemonSet`:
```shell
cat < 45m v1.24.0
kind-worker2 Ready 44m v1.24.0
```
List the images then filter for `alpine`:
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
docker.io/library/alpine:3.7.3 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
docker.io/library/alpine@sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
```
## Automatically Cleaning Images
After deploying Eraser, it will automatically clean images in a regular interval. This interval can be set by `--repeat-period` argument to `eraser-controller-manager`. The default interval is 24 hours (`24h`). Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
Eraser will schedule collector pods to each node in the cluster, and each pod will contain 3 containers: collector, scanner, and eraser that will run to completion.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system collector-kind-control-plane-sb789 0/3 Completed 0 26m
eraser-system collector-kind-worker-j84hm 0/3 Completed 0 26m
eraser-system collector-kind-worker2-4lbdr 0/3 Completed 0 26m
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 26m
```
The collector container sends the list of all images to the scanner container, which scans and reports non-compliant images to the eraser container for removal of images that are non-running. Once all pods are completed, they will be automatically cleaned up.
> If you want to remove all the images periodically, you can skip the scanner container by removing the `--scanner-image` argument. If you are deploying with Helm, use `--set scanner.image.repository=""` to remove the scanner image. In this case, each collector pod will hold 2 containers: collector and eraser.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system collector-kind-control-plane-ksk2b 0/2 Completed 0 50s
eraser-system collector-kind-worker-cpgqc 0/2 Completed 0 50s
eraser-system collector-kind-worker2-k25df 0/2 Completed 0 50s
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 55s
```
================================================
FILE: docs/versioned_docs/version-v0.4.x/releasing.md
================================================
---
title: Releasing
---
## Overview
The release process consists of three phases: versioning, building, and publishing.
Versioning involves maintaining the following files:
- **Makefile** - the Makefile contains a VERSION variable that defines the version of the project.
- **manager.yaml** - the controller-manager deployment yaml contains the latest release tag image of the project.
- **eraser.yaml** - the eraser.yaml contains all eraser resources to be deployed to a cluster including the latest release tag image of the project.
The steps below explain how to update these files. In addition, the repository should be tagged with the semantic version identifying the release.
Building involves obtaining a copy of the repository and triggering a build as part of the GitHub Actions CI pipeline.
Publishing involves creating a release tag and creating a new _Release_ on GitHub.
## Versioning
1. Obtain a copy of the repository.
```
git clone git@github.com:eraser-dev/eraser.git
```
1. If this is a patch release for a release branch, check out applicable branch, such as `release-0.1`. If not, branch should be `main`
1. Execute the release-patch target to generate patch. Give the semantic version of the release:
```
make release-manifest NEWVERSION=vX.Y.Z
```
1. Promote staging manifest to release.
```
make promote-staging-manifest
```
1. If it's a new minor release (e.g. v0.**4**.x -> 0.**5**.0), tag docs to be versioned. Make sure to keep patch version as `.x` for a minor release.
```
make version-docs NEWVERSION=v0.5.x
```
1. Preview the changes:
```
git status
git diff
```
## Building and releasing
1. Commit the changes and push to remote repository to create a pull request.
```
git checkout -b release-
git commit -a -s -m "Prepare release"
git push
```
2. Once the PR is merged to `main` or `release` branch (`` below), tag that commit with release version and push tags to remote repository.
```
git checkout
git pull origin
git tag -a -m ''
git push origin
```
3. Pushing the release tag will trigger GitHub Actions to trigger `release` job.
This will build the `ghcr.io/eraser-dev/eraser` and `ghcr.io/eraser-dev/eraser-manager` images automatically, then publish the new release tag.
## Publishing
1. GitHub Action will create a new release, review and edit it at https://github.com/eraser-dev/eraser/releases
================================================
FILE: docs/versioned_docs/version-v0.4.x/setup.md
================================================
---
title: Setup
---
# Development Setup
This document describes the steps to get started with development.
You can either utilize [Codespaces](https://docs.github.com/en/codespaces/overview) or setup a local environment.
## Local Setup
### Prerequisites:
- [go](https://go.dev/) with version 1.17 or later.
- [docker](https://docs.docker.com/get-docker/)
- [kind](https://kind.sigs.k8s.io/)
- `make`
### Get things running
- Get dependencies with `go get`
- This project uses `make`. You can utilize `make help` to see available targets. For local deployment make targets help to build, test and deploy.
### Making changes
Please refer to [Development Reference](#development-reference) for more details on the specific commands.
To test your changes on a cluster:
```bash
# generate necessary api files (optional - only needed if changes to api folder).
make generate
# build applicable images
make docker-build-manager MANAGER_IMG=eraser-manager:dev
make docker-build-eraser ERASER_IMG=eraser:dev
make docker-build-collector COLLECTOR_IMG=collector:dev
make docker-build-trivy-scanner TRIVY_SCANNER_IMG=eraser-trivy-scanner:dev
# make sure updated image is present on cluster (e.g., see kind example below)
kind load docker-image \
eraser-manager:dev \
eraser-trivy-scanner:dev \
eraser:dev \
collector:dev
make manifests
make deploy
# to remove the deployment
make undeploy
```
To test your changes to manager locally:
```bash
make run
```
Example Output:
```
you@local:~/eraser$ make run
docker build . \
-t eraser-tooling \
-f build/tooling/Dockerfile
[+] Building 7.8s (8/8) FINISHED
=> => naming to docker.io/library/eraser-tooling 0.0s
docker run -v /home/eraser/config:/config -w /config/manager \
registry.k8s.io/kustomize/kustomize:v3.8.9 edit set image controller=eraser-manager:dev
docker run -v /home/eraser:/eraser eraser-tooling controller-gen \
crd \
rbac:roleName=manager-role \
webhook \
paths="./..." \
output:crd:artifacts:config=config/crd/bases
rm -rf manifest_staging
mkdir -p manifest_staging/deploy
docker run --rm -v /home/eraser:/eraser \
registry.k8s.io/kustomize/kustomize:v3.8.9 build \
/eraser/config/default -o /eraser/manifest_staging/deploy/eraser.yaml
docker run -v /home/eraser:/eraser eraser-tooling controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
go fmt ./...
go vet ./...
go run ./main.go
{"level":"info","ts":1652985685.1663408,"logger":"controller-runtime.metrics","msg":"Metrics server is starting to listen","addr":":8080"}
...
```
## Development Reference
Eraser is using tooling from [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). For Eraser this tooling is containerized into the `eraser-tooling` image. The `make` targets can use this tooling and build the image when necessary.
You can override the default configuration using environment variables. Below you can find a reference of targets and configuration options.
### Common Configuration
| Environment Variable | Description |
| -------------------- | --------------------------------------------------------------------------------------------- |
| VERSION | Specifies the version (i.e., the image tag) of eraser to be used. |
| MANAGER_IMG | Defines the image url for the Eraser manager. Used for tagging, pulling and pushing the image |
| ERASER_IMG | Defines the image url for the Eraser. Used for tagging, pulling and pushing the image |
| COLLECTOR_IMG | Defines the image url for the Collector. Used for tagging, pulling and pushing the image |
### Linting
- `make lint`
Lints the go code.
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------- |
| GOLANGCI_LINT | Specifies the go linting binary to be used for linting. |
### Development
- `make generate`
Generates necessary files for the k8s api stored under `api/v1alpha1/zz_generated.deepcopy.go`. See the [kubebuilder docs](https://book.kubebuilder.io/cronjob-tutorial/other-api-files.html) for details.
- `make manifests`
Generates the eraser deployment yaml files under `manifest_staging/deploy`.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------- |
| ERASER_IMG | Defines the image url for the Eraser. |
| MANAGER_IMG | Defines the image url for the Eraser manager. |
| KUSTOMIZE_VERSION | Define Kustomize version for generating manifests. |
- `make test`
Runs the unit tests for the eraser project.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------- |
| ENVTEST | Specifies the envtest setup binary. |
| ENVTEST_K8S_VERSION | Specifies the Kubernetes version for envtest setup command. |
- `make e2e-test`
Runs e2e tests on a cluster.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------- |
| ERASER_IMG | Eraser image to be used for e2e test. |
| MANAGER_IMG | Eraser manager image to be used for e2e test. |
| KUBERNETES_VERSION | Kubernetes version for e2e test. |
| TEST_COUNT | Sets repetition for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TIMEOUT | Sets timeout for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TESTFLAGS | Sets additional test flags |
### Build
- `make build`
Builds the eraser manager binaries.
- `make run`
Runs the eraser manager on your local machine.
- `make docker-build-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-eraser`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| ERASER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-eraser`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| ERASER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
### Deployment
- `make install`
Install CRDs into the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make uninstall`
Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make deploy`
Deploys eraser to the cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
| MANAGER_IMG | Specifies the eraser manager image version to be used for deployment |
- `make undeploy`
Undeploy controller from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources that need to be removed. |
### Release
- `make release-manifest`
Generates k8s manifests files for a release.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------ |
| NEWVERSION | Sets the new version in the Makefile |
- `make promote-staging-manifest`
Promotes the k8s deployment yaml files to release.
================================================
FILE: docs/versioned_docs/version-v0.4.x/trivy.md
================================================
---
title: Trivy
---
## Trivy Provider Options
The trivy provider is used in Eraser for image scanning and detecting vulnerabilities. The following arguments can be supplied to the scanner to specify which types of images will be detected for removal by the trivy scanner container:
* --ignore-unfixed: boolean to report only fixed vulnerabilities (default true)
* --security-checks: comma-separated list of what security issues to detect (default "vuln")
* --vuln-type: list of severity levels to report (default "CRITICAL")
* --delete-scan-failed-images : boolean to delete images for which scanning has failed (default true)
================================================
FILE: docs/versioned_docs/version-v0.5.x/architecture.md
================================================
---
title: Architecture
---
At a high level, Eraser has two main modes of operation: manual and automated.
Manual image removal involves supplying a list of images to remove; Eraser then
deploys pods to clean up the images you supplied.
Automated image removal runs on a timer. By default, the automated process
removes images based on the results of a vulnerability scan. The default
vulnerability scanner is Trivy, but others can be provided in its place. Or,
the scanner can be disabled altogether, in which case Eraser acts as a garbage
collector -- it will remove all non-running images in your cluster.
## Manual image cleanup
Note: metrics are not yet implemented in Eraser v0.5.x, but will be available in the upcoming v1.0.0 release.
## Automated analysis, scanning, and cleanup
================================================
FILE: docs/versioned_docs/version-v0.5.x/code-of-conduct.md
================================================
---
title: Code of Conduct
---
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
Resources:
- [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
- [Code of Conduct Reporting](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
================================================
FILE: docs/versioned_docs/version-v0.5.x/contributing.md
================================================
---
title: Contributing
---
There are several ways to get involved with Eraser
- Join the [mailing list](https://groups.google.com/u/1/g/eraser-dev) to get notifications for releases, security announcements, etc.
- Participate in the [biweekly community meetings](https://docs.google.com/document/d/1Sj5u47K3WUGYNPmQHGFpb52auqZb1FxSlWAQnPADhWI/edit) to disucss development, issues, use cases, etc.
- Join the `#eraser` channel on the [Kubernetes Slack](https://slack.k8s.io/)
- View the [development setup instructions](https://eraser-dev.github.io/eraser/docs/development)
This project welcomes contributions and suggestions.
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
================================================
FILE: docs/versioned_docs/version-v0.5.x/custom-scanner.md
================================================
---
title: Custom Scanner
---
## Creating a Custom Scanner
To create a custom scanner for non-compliant images, use the following [template](https://github.com/eraser-dev/eraser-scanner-template/).
In order to customize your scanner, start by creating a `NewImageProvider()`. The ImageProvider interface can be found can be found [here](../../../pkg/scanners/template/scanner_template.go).
The ImageProvider will allow you to retrieve the list of all non-running and non-excluded images from the collector container through the `ReceiveImages()` function. Process these images with your customized scanner and threshold, and use `SendImages()` to pass the images found non-compliant to the eraser container for removal. Finally, complete the scanning process by calling `Finish()`.
When complete, provide your custom scanner image to Eraser in deployment.
================================================
FILE: docs/versioned_docs/version-v0.5.x/customization.md
================================================
---
title: Customization
---
By default, successful jobs will be deleted after a period of time. You can change this behavior by setting the following flags in the eraser-controller-manager:
- `--job-cleanup-on-success-delay`: Duration to delay job deletion after successful runs. 0 means no delay. Defaults to `0`.
- `--job-cleanup-on-error-delay`: Duration to delay job deletion after errored runs. 0 means no delay. Defaults to `24h`.
- `--job-success-ratio`: Ratio of successful/total runs to consider a job successful. 1.0 means all runs must succeed. Defaults to `1.0`.
For duration, valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
================================================
FILE: docs/versioned_docs/version-v0.5.x/exclusion.md
================================================
---
title: Exclusion
---
## Excluding registries, repositories, and images
Eraser can exclude registries (example, `docker.io/library/*`) and also specific images with a tag (example, `docker.io/library/ubuntu:18.04`) or digest (example, `sha256:80f31da1ac7b312ba29d65080fd...`) from its removal process.
To exclude any images or registries from the removal, create configmap(s) with the label `eraser.sh/exclude.list=true` in the eraser-system namespace with a JSON file holding the excluded images.
```bash
$ cat > sample.json < `ImageList` is a cluster-scoped resource and must be called imagelist. `"*"` can be specified to remove all non-running images instead of individual images.
Creating an `ImageList` should trigger an `ImageJob` that will deploy Eraser pods on every node to perform the removal given the list of images.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-55d54c4fb6-dcglq 1/1 Running 0 9m8s
eraser-system eraser-kind-control-plane 1/1 Running 0 11s
eraser-system eraser-kind-worker 1/1 Running 0 11s
eraser-system eraser-kind-worker2 1/1 Running 0 11s
```
Pods will run to completion and the images will be removed.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-6d6d5594d4-phl2q 1/1 Running 0 4m16s
eraser-system eraser-kind-control-plane 0/1 Completed 0 22s
eraser-system eraser-kind-worker 0/1 Completed 0 22s
eraser-system eraser-kind-worker2 0/1 Completed 0 22s
```
The `ImageList` custom resource status field will contain the status of the last job. The success and failure counts indicate the number of nodes the Eraser agent was run on.
```shell
$ kubectl describe ImageList imagelist
...
Status:
Failed: 0
Success: 3
Timestamp: 2022-02-25T23:41:55Z
...
```
Verify the unused images are removed.
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
```
If the image has been successfully removed, there will be no output.
================================================
FILE: docs/versioned_docs/version-v0.5.x/quick-start.md
================================================
---
title: Quick Start
---
This tutorial demonstrates the functionality of Eraser and validates that non-running images are removed succesfully.
## Deploy a DaemonSet
After following the [install instructions](installation.md), we'll apply a demo `DaemonSet`. For illustrative purposes, a DaemonSet is applied and deleted so the non-running images remain on all nodes. The alpine image with the `3.7.3` tag will be used in this example. This is an image with a known critical vulnerability.
First, apply the `DaemonSet`:
```shell
cat < 45m v1.24.0
kind-worker2 Ready 44m v1.24.0
```
List the images then filter for `alpine`:
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
docker.io/library/alpine:3.7.3 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
docker.io/library/alpine@sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
```
## Automatically Cleaning Images
After deploying Eraser, it will automatically clean images in a regular interval. This interval can be set by `--repeat-period` argument to `eraser-controller-manager`. The default interval is 24 hours (`24h`). Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
Eraser will schedule collector pods to each node in the cluster, and each pod will contain 3 containers: collector, scanner, and eraser that will run to completion.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system collector-kind-control-plane-sb789 0/3 Completed 0 26m
eraser-system collector-kind-worker-j84hm 0/3 Completed 0 26m
eraser-system collector-kind-worker2-4lbdr 0/3 Completed 0 26m
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 26m
```
The collector container sends the list of all images to the scanner container, which scans and reports non-compliant images to the eraser container for removal of images that are non-running. Once all pods are completed, they will be automatically cleaned up.
> If you want to remove all the images periodically, you can skip the scanner container by removing the `--scanner-image` argument. If you are deploying with Helm, use `--set scanner.image.repository=""` to remove the scanner image. In this case, each collector pod will hold 2 containers: collector and eraser.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system collector-kind-control-plane-ksk2b 0/2 Completed 0 50s
eraser-system collector-kind-worker-cpgqc 0/2 Completed 0 50s
eraser-system collector-kind-worker2-k25df 0/2 Completed 0 50s
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 55s
```
================================================
FILE: docs/versioned_docs/version-v0.5.x/releasing.md
================================================
---
title: Releasing
---
## Overview
The release process consists of three phases: versioning, building, and publishing.
Versioning involves maintaining the following files:
- **Makefile** - the Makefile contains a VERSION variable that defines the version of the project.
- **manager.yaml** - the controller-manager deployment yaml contains the latest release tag image of the project.
- **eraser.yaml** - the eraser.yaml contains all eraser resources to be deployed to a cluster including the latest release tag image of the project.
The steps below explain how to update these files. In addition, the repository should be tagged with the semantic version identifying the release.
Building involves obtaining a copy of the repository and triggering a build as part of the GitHub Actions CI pipeline.
Publishing involves creating a release tag and creating a new _Release_ on GitHub.
## Versioning
1. Obtain a copy of the repository.
```
git clone git@github.com:eraser-dev/eraser.git
```
1. If this is a patch release for a release branch, check out applicable branch, such as `release-0.1`. If not, branch should be `main`
1. Execute the release-patch target to generate patch. Give the semantic version of the release:
```
make release-manifest NEWVERSION=vX.Y.Z
```
1. Promote staging manifest to release.
```
make promote-staging-manifest
```
1. If it's a new minor release (e.g. v0.**4**.x -> 0.**5**.0), tag docs to be versioned. Make sure to keep patch version as `.x` for a minor release.
```
make version-docs NEWVERSION=v0.5.x
```
1. Preview the changes:
```
git status
git diff
```
## Building and releasing
1. Commit the changes and push to remote repository to create a pull request.
```
git checkout -b release-
git commit -a -s -m "Prepare release"
git push
```
2. Once the PR is merged to `main` or `release` branch (`` below), tag that commit with release version and push tags to remote repository.
```
git checkout
git pull origin
git tag -a -m ''
git push origin
```
3. Pushing the release tag will trigger GitHub Actions to trigger `release` job.
This will build the `ghcr.io/eraser-dev/eraser` and `ghcr.io/eraser-dev/eraser-manager` images automatically, then publish the new release tag.
## Publishing
1. GitHub Action will create a new release, review and edit it at https://github.com/eraser-dev/eraser/releases
================================================
FILE: docs/versioned_docs/version-v0.5.x/setup.md
================================================
---
title: Setup
---
# Development Setup
This document describes the steps to get started with development.
You can either utilize [Codespaces](https://docs.github.com/en/codespaces/overview) or setup a local environment.
## Local Setup
### Prerequisites:
- [go](https://go.dev/) with version 1.17 or later.
- [docker](https://docs.docker.com/get-docker/)
- [kind](https://kind.sigs.k8s.io/)
- `make`
### Get things running
- Get dependencies with `go get`
- This project uses `make`. You can utilize `make help` to see available targets. For local deployment make targets help to build, test and deploy.
### Making changes
Please refer to [Development Reference](#development-reference) for more details on the specific commands.
To test your changes on a cluster:
```bash
# generate necessary api files (optional - only needed if changes to api folder).
make generate
# build applicable images
make docker-build-manager MANAGER_IMG=eraser-manager:dev
make docker-build-eraser ERASER_IMG=eraser:dev
make docker-build-collector COLLECTOR_IMG=collector:dev
make docker-build-trivy-scanner TRIVY_SCANNER_IMG=eraser-trivy-scanner:dev
# make sure updated image is present on cluster (e.g., see kind example below)
kind load docker-image \
eraser-manager:dev \
eraser-trivy-scanner:dev \
eraser:dev \
collector:dev
make manifests
make deploy
# to remove the deployment
make undeploy
```
To test your changes to manager locally:
```bash
make run
```
Example Output:
```
you@local:~/eraser$ make run
docker build . \
-t eraser-tooling \
-f build/tooling/Dockerfile
[+] Building 7.8s (8/8) FINISHED
=> => naming to docker.io/library/eraser-tooling 0.0s
docker run -v /home/eraser/config:/config -w /config/manager \
registry.k8s.io/kustomize/kustomize:v3.8.9 edit set image controller=eraser-manager:dev
docker run -v /home/eraser:/eraser eraser-tooling controller-gen \
crd \
rbac:roleName=manager-role \
webhook \
paths="./..." \
output:crd:artifacts:config=config/crd/bases
rm -rf manifest_staging
mkdir -p manifest_staging/deploy
docker run --rm -v /home/eraser:/eraser \
registry.k8s.io/kustomize/kustomize:v3.8.9 build \
/eraser/config/default -o /eraser/manifest_staging/deploy/eraser.yaml
docker run -v /home/eraser:/eraser eraser-tooling controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
go fmt ./...
go vet ./...
go run ./main.go
{"level":"info","ts":1652985685.1663408,"logger":"controller-runtime.metrics","msg":"Metrics server is starting to listen","addr":":8080"}
...
```
## Development Reference
Eraser is using tooling from [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). For Eraser this tooling is containerized into the `eraser-tooling` image. The `make` targets can use this tooling and build the image when necessary.
You can override the default configuration using environment variables. Below you can find a reference of targets and configuration options.
### Common Configuration
| Environment Variable | Description |
| -------------------- | --------------------------------------------------------------------------------------------- |
| VERSION | Specifies the version (i.e., the image tag) of eraser to be used. |
| MANAGER_IMG | Defines the image url for the Eraser manager. Used for tagging, pulling and pushing the image |
| ERASER_IMG | Defines the image url for the Eraser. Used for tagging, pulling and pushing the image |
| COLLECTOR_IMG | Defines the image url for the Collector. Used for tagging, pulling and pushing the image |
### Linting
- `make lint`
Lints the go code.
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------- |
| GOLANGCI_LINT | Specifies the go linting binary to be used for linting. |
### Development
- `make generate`
Generates necessary files for the k8s api stored under `api/v1alpha1/zz_generated.deepcopy.go`. See the [kubebuilder docs](https://book.kubebuilder.io/cronjob-tutorial/other-api-files.html) for details.
- `make manifests`
Generates the eraser deployment yaml files under `manifest_staging/deploy`.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------- |
| ERASER_IMG | Defines the image url for the Eraser. |
| MANAGER_IMG | Defines the image url for the Eraser manager. |
| KUSTOMIZE_VERSION | Define Kustomize version for generating manifests. |
- `make test`
Runs the unit tests for the eraser project.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------- |
| ENVTEST | Specifies the envtest setup binary. |
| ENVTEST_K8S_VERSION | Specifies the Kubernetes version for envtest setup command. |
- `make e2e-test`
Runs e2e tests on a cluster.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------- |
| ERASER_IMG | Eraser image to be used for e2e test. |
| MANAGER_IMG | Eraser manager image to be used for e2e test. |
| KUBERNETES_VERSION | Kubernetes version for e2e test. |
| TEST_COUNT | Sets repetition for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TIMEOUT | Sets timeout for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TESTFLAGS | Sets additional test flags |
### Build
- `make build`
Builds the eraser manager binaries.
- `make run`
Runs the eraser manager on your local machine.
- `make docker-build-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-eraser`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| ERASER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-eraser`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| ERASER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
### Deployment
- `make install`
Install CRDs into the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make uninstall`
Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make deploy`
Deploys eraser to the cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
| MANAGER_IMG | Specifies the eraser manager image version to be used for deployment |
- `make undeploy`
Undeploy controller from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources that need to be removed. |
### Release
- `make release-manifest`
Generates k8s manifests files for a release.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------ |
| NEWVERSION | Sets the new version in the Makefile |
- `make promote-staging-manifest`
Promotes the k8s deployment yaml files to release.
================================================
FILE: docs/versioned_docs/version-v0.5.x/trivy.md
================================================
---
title: Trivy
---
## Trivy Provider Options
The trivy provider is used in Eraser for image scanning and detecting vulnerabilities. The following arguments can be supplied to the scanner to specify which types of images will be detected for removal by the trivy scanner container:
* --ignore-unfixed: boolean to report only fixed vulnerabilities (default true)
* --security-checks: comma-separated list of what security issues to detect (default "vuln")
* --vuln-type: list of severity levels to report (default "CRITICAL")
* --delete-scan-failed-images : boolean to delete images for which scanning has failed (default true)
================================================
FILE: docs/versioned_docs/version-v1.0.x/architecture.md
================================================
---
title: Architecture
---
At a high level, Eraser has two main modes of operation: manual and automated.
Manual image removal involves supplying a list of images to remove; Eraser then
deploys pods to clean up the images you supplied.
Automated image removal runs on a timer. By default, the automated process
removes images based on the results of a vulnerability scan. The default
vulnerability scanner is Trivy, but others can be provided in its place. Or,
the scanner can be disabled altogether, in which case Eraser acts as a garbage
collector -- it will remove all non-running images in your cluster.
## Manual image cleanup
## Automated analysis, scanning, and cleanup
================================================
FILE: docs/versioned_docs/version-v1.0.x/code-of-conduct.md
================================================
---
title: Code of Conduct
---
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
Resources:
- [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
- [Code of Conduct Reporting](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
================================================
FILE: docs/versioned_docs/version-v1.0.x/contributing.md
================================================
---
title: Contributing
---
There are several ways to get involved with Eraser
- Join the [mailing list](https://groups.google.com/u/1/g/eraser-dev) to get notifications for releases, security announcements, etc.
- Participate in the [biweekly community meetings](https://docs.google.com/document/d/1Sj5u47K3WUGYNPmQHGFpb52auqZb1FxSlWAQnPADhWI/edit) to disucss development, issues, use cases, etc.
- Join the `#eraser` channel on the [Kubernetes Slack](https://slack.k8s.io/)
- View the [development setup instructions](https://eraser-dev.github.io/eraser/docs/development)
This project welcomes contributions and suggestions.
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
================================================
FILE: docs/versioned_docs/version-v1.0.x/custom-scanner.md
================================================
---
title: Custom Scanner
---
## Creating a Custom Scanner
To create a custom scanner for non-compliant images, use the following [template](https://github.com/eraser-dev/eraser-scanner-template/).
In order to customize your scanner, start by creating a `NewImageProvider()`. The ImageProvider interface can be found can be found [here](../../../pkg/scanners/template/scanner_template.go).
The ImageProvider will allow you to retrieve the list of all non-running and non-excluded images from the collector container through the `ReceiveImages()` function. Process these images with your customized scanner and threshold, and use `SendImages()` to pass the images found non-compliant to the eraser container for removal. Finally, complete the scanning process by calling `Finish()`.
When complete, provide your custom scanner image to Eraser in deployment.
================================================
FILE: docs/versioned_docs/version-v1.0.x/customization.md
================================================
---
title: Customization
---
## Overview
Eraser uses a configmap to configure its behavior. The configmap is part of the
deployment and it is not necessary to deploy it manually. Once deployed, the configmap
can be edited at any time:
```bash
kubectl edit configmap --namespace eraser-system eraser-manager-config
```
If an eraser job is already running, the changes will not take effect until the job completes.
The configuration is in yaml.
## Key Concepts
### Basic architecture
The _manager_ runs as a pod in your cluster and manages _ImageJobs_. Think of
an _ImageJob_ as a unit of work, performed on every node in your cluster. Each
node runs a sub-job. The goal of the _ImageJob_ is to assess the images on your
cluster's nodes, and to remove the images you don't want. There are two stages:
1. Assessment
1. Removal.
### Scheduling
An _ImageJob_ can either be created on-demand (see [Manual Removal](https://eraser-dev.github.io/eraser/docs/manual-removal)),
or they can be spawned on a timer like a cron job. On-demand jobs skip the
assessment stage and get right down to the business of removing the images you
specified. The behavior of an on-demand job is quite different from that of
timed jobs.
### Fault Tolerance
Because an _ImageJob_ runs on every node in your cluster, and the conditions on
each node may vary widely, some of the sub-jobs may fail. If you cannot
tolerate any failure, set the `manager.imageJob.successRatio` property to
`1.0`. If 75% success sounds good to you, set it to `0.75`. In that case, if
fewer than 75% of the pods spawned by the _ImageJob_ report success, the job as
a whole will be marked as a failure.
This is mainly to help diagnose error conditions. As such, you can set
`manager.imageJob.cleanup.delayOnFailure` to a long value so that logs can be
captured before the spawned pods are cleaned up.
### Excluding Nodes
For various reasons, you may want to prevent Eraser from scheduling pods on
certain nodes. To do so, the nodes can be given a special label. By default,
this label is `eraser.sh/cleanup.filter`, but you can configure the behavior with
the options under `manager.nodeFilter`. The [table](#detailed-options) provides more detail.
### Configuring Components
An _ImageJob_ is made up of various sub-jobs, with one sub-job for each node.
These sub-jobs can be broken down further into three stages.
1. Collection (What is on the node?)
1. Scanning (What images conform to the policy I've provided?)
1. Removal (Remove images based on the results of the above)
Of the above stages, only Removal is mandatory. The others can be disabled.
Furthermore, manually triggered _ImageJobs_ will skip right to removal, even if
Eraser is configured to collect and scan. Collection and Scanning will only
take place when:
1. The collector and/or scanner `components` are enabled, AND
1. The job was *not* triggered manually by creating an _ImageList_.
Disabling scanner will remove all non-running images by default.
### Swapping out components
The collector, scanner, and eraser components can all be swapped out. This
enables you to build and host the images yourself. In addition, the scanner's
behavior can be completely tailored to your needs by swapping out the default
image with one of your own. To specify the images, use the
`components..image.repo` and `components..image.tag`,
where `` is one of `collector`, `scanner`, or `eraser`.
## Universal Options
The following portions of the configmap apply no matter how you spawn your
_ImageJob_. The values provided below are the defaults. For more detail on
these options, see the [table](#detailed-options).
```yaml
manager:
runtime: containerd
otlpEndpoint: "" # empty string disables OpenTelemetry
logLevel: info
profile:
enabled: false
port: 6060
imageJob:
successRatio: 1.0
cleanup:
delayOnSuccess: 0s
delayOnFailure: 24h
pullSecrets: [] # image pull secrets for collector/scanner/eraser
priorityClassName: "" # priority class name for collector/scanner/eraser
nodeFilter:
type: exclude # must be either exclude|include
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
components:
eraser:
image:
repo: ghcr.io/eraser-dev/eraser
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Component Options
```yaml
components:
collector:
enabled: true
image:
repo: ghcr.io/eraser-dev/collector
tag: v1.0.0
request:
mem: 25Mi
cpu: 7m
limit:
mem: 500Mi
cpu: 0
scanner:
enabled: true
image:
repo: ghcr.io/eraser-dev/eraser-trivy-scanner
tag: v1.0.0
request:
mem: 500Mi
cpu: 1000m
limit:
mem: 2Gi
cpu: 0
config: |
# this is the schema for the provided 'trivy-scanner'. custom scanners
# will define their own configuration. see the below
eraser:
image:
repo: ghcr.io/eraser-dev/eraser
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Scanner Options
These options can be provided to `components.scanner.config`. They will be
passed through as a string to the scanner container and parsed there. If you
want to configure your own scanner, you must provide some way to parse this.
Below are the values recognized by the provided `eraser-trivy-scanner` image.
Values provided below are the defaults.
```yaml
cacheDir: /var/lib/trivy # The file path inside the container to store the cache
dbRepo: ghcr.io/aquasecurity/trivy-db # The container registry from which to fetch the trivy database
deleteFailedImages: true # if true, remove images for which scanning fails, regardless of why it failed
vulnerabilities:
ignoreUnfixed: true # consider the image compliant if there are no known fixes for the vulnerabilities found.
types: # a list of vulnerability types. for more info, see trivy's documentation.
- os
- library
securityChecks: # see trivy's documentation for more invormation
- vuln
severities: # in this case, only flag images with CRITICAL vulnerability for removal
- CRITICAL
timeout:
total: 23h # if scanning isn't completed before this much time elapses, abort the whole scan
perImage: 1h # if scanning a single image exceeds this time, scanning will be aborted
```
## Detailed Options
| Option | Description | Default |
| --- | --- | --- |
| manager.runtime | The runtime to use for the manager's containers. Must be one of containerd, crio, or dockershim. It is assumed that your nodes are all using the same runtime, and there is currently no way to configure multiple runtimes. | containerd |
| manager.otlpEndpoint | The endpoint to send OpenTelemetry data to. If empty, data will not be sent. | "" |
| manager.logLevel | The log level for the manager's containers. Must be one of debug, info, warn, error, dpanic, panic, or fatal. | info |
| manager.scheduling.repeatInterval | Use only when collector ando/or scanner are enabled. This is like a cron job, and will spawn an _ImageJob_ at the interval provided. | 24h |
| manager.scheduling.beginImmediately | If set to true, the fist _ImageJob_ will run immediately. If false, the job will not be spawned until after the interval (above) has elapsed. | true |
| manager.profile.enabled | Whether to enable profiling for the manager's containers. This is for debugging with `go tool pprof`. | false |
| manager.profile.port | The port on which to expose the profiling endpoint. | 6060 |
| manager.imageJob.successRatio | The ratio of successful image jobs required before a cleanup is performed. | 1.0 |
| manager.imageJob.cleanup.delayOnSuccess | The amount of time to wait after a successful image job before performing cleanup. | 0s |
| manager.imageJob.cleanup.delayOnFailure | The amount of time to wait after a failed image job before performing cleanup. | 24h |
| manager.pullSecrets | The image pull secrets to use for collector, scanner, and eraser containers. | [] |
| manager.priorityClassName | The priority class to use for collector, scanner, and eraser containers. | "" |
| manager.nodeFilter.type | The type of node filter to use. Must be either "exclude" or "include". | exclude |
| manager.nodeFilter.selectors | A list of selectors used to filter nodes. | [] |
| components.collector.enabled | Whether to enable the collector component. | true |
| components.collector.image.repo | The repository containing the collector image. | ghcr.io/eraser-dev/collector |
| components.collector.image.tag | The tag of the collector image. | v1.0.0 |
| components.collector.request.mem | The amount of memory to request for the collector container. | 25Mi |
| components.collector.request.cpu | The amount of CPU to request for the collector container. | 7m |
| components.collector.limit.mem | The maximum amount of memory the collector container is allowed to use. | 500Mi |
| components.collector.limit.cpu | The maximum amount of CPU the collector container is allowed to use. | 0 |
| components.scanner.enabled | Whether to enable the scanner component. | true |
| components.scanner.image.repo | The repository containing the scanner image. | ghcr.io/eraser-dev/eraser-trivy-scanner |
| components.scanner.image.tag | The tag of the scanner image. | v1.0.0 |
| components.scanner.request.mem | The amount of memory to request for the scanner container. | 500Mi |
| components.scanner.request.cpu | The amount of CPU to request for the scanner container. | 1000m |
| components.scanner.limit.mem | The maximum amount of memory the scanner container is allowed to use. | 2Gi |
| components.scanner.limit.cpu | The maximum amount of CPU the scanner container is allowed to use. | 0 |
| components.scanner.config | The configuration to pass to the scanner container, as a YAML string. | See YAML below |
| components.eraser.image.repo | The repository containing the eraser image. | ghcr.io/eraser-dev/eraser |
| components.eraser.image.tag | The tag of the eraser image. | v1.0.0 |
| components.eraser.request.mem | The amount of memory to request for the eraser container. | 25Mi |
| components.eraser.request.cpu | The amount of CPU to request for the eraser container. | 0 |
================================================
FILE: docs/versioned_docs/version-v1.0.x/exclusion.md
================================================
---
title: Exclusion
---
## Excluding registries, repositories, and images
Eraser can exclude registries (example, `docker.io/library/*`) and also specific images with a tag (example, `docker.io/library/ubuntu:18.04`) or digest (example, `sha256:80f31da1ac7b312ba29d65080fd...`) from its removal process.
To exclude any images or registries from the removal, create configmap(s) with the label `eraser.sh/exclude.list=true` in the eraser-system namespace with a JSON file holding the excluded images.
```bash
$ cat > sample.json <<"EOF"
{
"excluded": [
"docker.io/library/*",
"ghcr.io/eraser-dev/test:latest"
]
}
EOF
$ kubectl create configmap excluded --from-file=sample.json --namespace=eraser-system
$ kubectl label configmap excluded eraser.sh/exclude.list=true -n eraser-system
```
## Exempting Nodes from the Eraser Pipeline
Exempting nodes from cleanup was added in v1.0.0. When deploying Eraser, you can specify whether there is a list of nodes you would like to `include` or `exclude` from the cleanup process using the configmap. For more information, see the section on [customization](https://eraser-dev.github.io/eraser/docs/customization).
================================================
FILE: docs/versioned_docs/version-v1.0.x/faq.md
================================================
---
title: FAQ
---
## Why am I still seeing vulnerable images?
Eraser currently targets **non-running** images, so any vulnerable images that are currently running will not be removed. In addition, the default vulnerability scanning with Trivy removes images with `CRITICAL` vulnerabilities. Any images with lower vulnerabilities will not be removed. This can be configured using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#scanner-options).
================================================
FILE: docs/versioned_docs/version-v1.0.x/installation.md
================================================
---
title: Installation
---
## Manifest
To install Eraser with the manifest file, run the following command:
```bash
kubectl apply -f https://raw.githubusercontent.com/eraser-dev/eraser/v1.0.0/deploy/eraser.yaml
```
## Helm
If you'd like to install and manage Eraser with Helm, follow the install instructions [here](https://github.com/eraser-dev/eraser/blob/main/charts/eraser/README.md)
================================================
FILE: docs/versioned_docs/version-v1.0.x/introduction.md
================================================
---
title: Introduction
slug: /
---
# Introduction
When deploying to Kubernetes, it's common for pipelines to build and push images to a cluster, but it's much less common for these images to be cleaned up. This can lead to accumulating bloat on the disk, and a host of non-compliant images lingering on the nodes.
The current garbage collection process deletes images based on a percentage of load, but this process does not consider the vulnerability state of the images. **Eraser** aims to provide a simple way to determine the state of an image, and delete it if it meets the specified criteria.
================================================
FILE: docs/versioned_docs/version-v1.0.x/manual-removal.md
================================================
---
title: Manual Removal
---
Create an `ImageList` and specify the images you would like to remove. In this case, the image `docker.io/library/alpine:3.7.3` will be removed.
```shell
cat < `ImageList` is a cluster-scoped resource and must be called imagelist. `"*"` can be specified to remove all non-running images instead of individual images.
Creating an `ImageList` should trigger an `ImageJob` that will deploy Eraser pods on every node to perform the removal given the list of images.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-55d54c4fb6-dcglq 1/1 Running 0 9m8s
eraser-system eraser-kind-control-plane 1/1 Running 0 11s
eraser-system eraser-kind-worker 1/1 Running 0 11s
eraser-system eraser-kind-worker2 1/1 Running 0 11s
```
Pods will run to completion and the images will be removed.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-6d6d5594d4-phl2q 1/1 Running 0 4m16s
eraser-system eraser-kind-control-plane 0/1 Completed 0 22s
eraser-system eraser-kind-worker 0/1 Completed 0 22s
eraser-system eraser-kind-worker2 0/1 Completed 0 22s
```
The `ImageList` custom resource status field will contain the status of the last job. The success and failure counts indicate the number of nodes the Eraser agent was run on.
```shell
$ kubectl describe ImageList imagelist
...
Status:
Failed: 0
Success: 3
Timestamp: 2022-02-25T23:41:55Z
...
```
Verify the unused images are removed.
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
```
If the image has been successfully removed, there will be no output.
================================================
FILE: docs/versioned_docs/version-v1.0.x/metrics.md
================================================
---
title: Metrics
---
To view Eraser metrics, you will need to deploy an Open Telemetry collector in the 'eraser-system' namespace, and an exporter. An example collector with a Prometheus exporter is [otelcollector.yaml](https://github.com/eraser-dev/eraser/blob/main/test/e2e/test-data/otelcollector.yaml), and the endpoint can be specified using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#universal-options). In this example, we are logging the collected data to the otel-collector pod, and exporting metrics through Prometheus at 'http://localhost:8889/metrics', but a separate exporter can also be configured.
Below is the list of metrics provided by Eraser per run:
#### Eraser
```yaml
- count
- name: images_removed_run_total
- description: Total images removed by eraser
```
#### Scanner
```yaml
- count
- name: vulnerable_images_run_total
- description: Total vulnerable images detected
```
#### ImageJob
```yaml
- count
- name: imagejob_run_total
- description: Total ImageJobs scheduled
- name: pods_completed_run_total
- description: Total pods completed
- name: pods_failed_run_total
- description: Total pods failed
- summary
- name: imagejob_duration_run_seconds
- description: Total time for ImageJobs scheduled to complete
```
================================================
FILE: docs/versioned_docs/version-v1.0.x/quick-start.md
================================================
---
title: Quick Start
---
This tutorial demonstrates the functionality of Eraser and validates that non-running images are removed succesfully.
## Deploy a DaemonSet
After following the [install instructions](installation.md), we'll apply a demo `DaemonSet`. For illustrative purposes, a DaemonSet is applied and deleted so the non-running images remain on all nodes. The alpine image with the `3.7.3` tag will be used in this example. This is an image with a known critical vulnerability.
First, apply the `DaemonSet`:
```shell
cat < 45m v1.24.0
kind-worker2 Ready 44m v1.24.0
```
List the images then filter for `alpine`:
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
docker.io/library/alpine:3.7.3 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
docker.io/library/alpine@sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
```
## Automatically Cleaning Images
After deploying Eraser, it will automatically clean images in a regular interval. This interval can be set using the `manager.scheduling.repeatInterval` setting in the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). The default interval is 24 hours (`24h`). Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
Eraser will schedule collector pods to each node in the cluster, and each pod will contain 3 containers: collector, scanner, and eraser that will run to completion.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system collector-kind-control-plane-sb789 0/3 Completed 0 26m
eraser-system collector-kind-worker-j84hm 0/3 Completed 0 26m
eraser-system collector-kind-worker2-4lbdr 0/3 Completed 0 26m
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 26m
```
The collector container sends the list of all images to the scanner container, which scans and reports non-compliant images to the eraser container for removal of images that are non-running. Once all pods are completed, they will be automatically cleaned up.
> If you want to remove all the images periodically, you can skip the scanner container by setting the `components.scanner.enabled` value to `false` using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). In this case, each collector pod will hold 2 containers: collector and eraser.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system collector-kind-control-plane-ksk2b 0/2 Completed 0 50s
eraser-system collector-kind-worker-cpgqc 0/2 Completed 0 50s
eraser-system collector-kind-worker2-k25df 0/2 Completed 0 50s
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 55s
```
================================================
FILE: docs/versioned_docs/version-v1.0.x/releasing.md
================================================
---
title: Releasing
---
## Create Release Pull Request
1. Go to `create_release_pull_request` workflow under actions.
2. Select run workflow, and use the workflow from your branch.
3. Input release version with the semantic version identifying the release.
4. Click run workflow and review the PR created by github-actions.
# Releasing
5. Once the PR is merged to `main`, tag that commit with release version and push tags to remote repository.
```
git checkout
git pull origin
git tag -a -m ''
git push origin
```
6. Pushing the release tag will trigger GitHub Actions to trigger `release` job.
This will build the `ghcr.io/eraser-dev/eraser`, `ghcr.io/eraser-dev/eraser-manager`, `ghcr.io/eraser-dev/collector`, and `ghcr.io/eraser-dev/eraser-trivy-scanner` images automatically, then publish the new release tag.
## Publishing
1. GitHub Action will create a new release, review and edit it at https://github.com/eraser-dev/eraser/releases
================================================
FILE: docs/versioned_docs/version-v1.0.x/setup.md
================================================
---
title: Setup
---
# Development Setup
This document describes the steps to get started with development.
You can either utilize [Codespaces](https://docs.github.com/en/codespaces/overview) or setup a local environment.
## Local Setup
### Prerequisites:
- [go](https://go.dev/) with version 1.17 or later.
- [docker](https://docs.docker.com/get-docker/)
- [kind](https://kind.sigs.k8s.io/)
- `make`
### Get things running
- Get dependencies with `go get`
- This project uses `make`. You can utilize `make help` to see available targets. For local deployment make targets help to build, test and deploy.
### Making changes
Please refer to [Development Reference](#development-reference) for more details on the specific commands.
To test your changes on a cluster:
```bash
# generate necessary api files (optional - only needed if changes to api folder).
make generate
# build applicable images
make docker-build-manager MANAGER_IMG=eraser-manager:dev
make docker-build-eraser ERASER_IMG=eraser:dev
make docker-build-collector COLLECTOR_IMG=collector:dev
make docker-build-trivy-scanner TRIVY_SCANNER_IMG=eraser-trivy-scanner:dev
# make sure updated image is present on cluster (e.g., see kind example below)
kind load docker-image \
eraser-manager:dev \
eraser-trivy-scanner:dev \
eraser:dev \
collector:dev
make manifests
make deploy
# to remove the deployment
make undeploy
```
To test your changes to manager locally:
```bash
make run
```
Example Output:
```
you@local:~/eraser$ make run
docker build . \
-t eraser-tooling \
-f build/tooling/Dockerfile
[+] Building 7.8s (8/8) FINISHED
=> => naming to docker.io/library/eraser-tooling 0.0s
docker run -v /home/eraser/config:/config -w /config/manager \
registry.k8s.io/kustomize/kustomize:v3.8.9 edit set image controller=eraser-manager:dev
docker run -v /home/eraser:/eraser eraser-tooling controller-gen \
crd \
rbac:roleName=manager-role \
webhook \
paths="./..." \
output:crd:artifacts:config=config/crd/bases
rm -rf manifest_staging
mkdir -p manifest_staging/deploy
docker run --rm -v /home/eraser:/eraser \
registry.k8s.io/kustomize/kustomize:v3.8.9 build \
/eraser/config/default -o /eraser/manifest_staging/deploy/eraser.yaml
docker run -v /home/eraser:/eraser eraser-tooling controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
go fmt ./...
go vet ./...
go run ./main.go
{"level":"info","ts":1652985685.1663408,"logger":"controller-runtime.metrics","msg":"Metrics server is starting to listen","addr":":8080"}
...
```
## Development Reference
Eraser is using tooling from [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). For Eraser this tooling is containerized into the `eraser-tooling` image. The `make` targets can use this tooling and build the image when necessary.
You can override the default configuration using environment variables. Below you can find a reference of targets and configuration options.
### Common Configuration
| Environment Variable | Description |
| -------------------- | --------------------------------------------------------------------------------------------- |
| VERSION | Specifies the version (i.e., the image tag) of eraser to be used. |
| MANAGER_IMG | Defines the image url for the Eraser manager. Used for tagging, pulling and pushing the image |
| ERASER_IMG | Defines the image url for the Eraser. Used for tagging, pulling and pushing the image |
| COLLECTOR_IMG | Defines the image url for the Collector. Used for tagging, pulling and pushing the image |
### Linting
- `make lint`
Lints the go code.
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------- |
| GOLANGCI_LINT | Specifies the go linting binary to be used for linting. |
### Development
- `make generate`
Generates necessary files for the k8s api stored under `api/v1alpha1/zz_generated.deepcopy.go`. See the [kubebuilder docs](https://book.kubebuilder.io/cronjob-tutorial/other-api-files.html) for details.
- `make manifests`
Generates the eraser deployment yaml files under `manifest_staging/deploy`.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------- |
| ERASER_IMG | Defines the image url for the Eraser. |
| MANAGER_IMG | Defines the image url for the Eraser manager. |
| KUSTOMIZE_VERSION | Define Kustomize version for generating manifests. |
- `make test`
Runs the unit tests for the eraser project.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------- |
| ENVTEST | Specifies the envtest setup binary. |
| ENVTEST_K8S_VERSION | Specifies the Kubernetes version for envtest setup command. |
- `make e2e-test`
Runs e2e tests on a cluster.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------- |
| ERASER_IMG | Eraser image to be used for e2e test. |
| MANAGER_IMG | Eraser manager image to be used for e2e test. |
| KUBERNETES_VERSION | Kubernetes version for e2e test. |
| TEST_COUNT | Sets repetition for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TIMEOUT | Sets timeout for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TESTFLAGS | Sets additional test flags |
### Build
- `make build`
Builds the eraser manager binaries.
- `make run`
Runs the eraser manager on your local machine.
- `make docker-build-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-eraser`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| ERASER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-eraser`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| ERASER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
### Deployment
- `make install`
Install CRDs into the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make uninstall`
Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make deploy`
Deploys eraser to the cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
| MANAGER_IMG | Specifies the eraser manager image version to be used for deployment |
- `make undeploy`
Undeploy controller from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources that need to be removed. |
### Release
- `make release-manifest`
Generates k8s manifests files for a release.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------ |
| NEWVERSION | Sets the new version in the Makefile |
- `make promote-staging-manifest`
Promotes the k8s deployment yaml files to release.
================================================
FILE: docs/versioned_docs/version-v1.0.x/trivy.md
================================================
---
title: Trivy
---
## Trivy Provider Options
The trivy provider is used in Eraser for image scanning and detecting vulnerabilities. See [Customization](https://eraser-dev.github.io/eraser/docs/customization#scanner-options) for more details on configuring the scanner.
================================================
FILE: docs/versioned_docs/version-v1.1.x/architecture.md
================================================
---
title: Architecture
---
At a high level, Eraser has two main modes of operation: manual and automated.
Manual image removal involves supplying a list of images to remove; Eraser then
deploys pods to clean up the images you supplied.
Automated image removal runs on a timer. By default, the automated process
removes images based on the results of a vulnerability scan. The default
vulnerability scanner is Trivy, but others can be provided in its place. Or,
the scanner can be disabled altogether, in which case Eraser acts as a garbage
collector -- it will remove all non-running images in your cluster.
## Manual image cleanup
## Automated analysis, scanning, and cleanup
================================================
FILE: docs/versioned_docs/version-v1.1.x/code-of-conduct.md
================================================
---
title: Code of Conduct
---
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
Resources:
- [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
- [Code of Conduct Reporting](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
================================================
FILE: docs/versioned_docs/version-v1.1.x/contributing.md
================================================
---
title: Contributing
---
There are several ways to get involved with Eraser
- Join the [mailing list](https://groups.google.com/u/1/g/eraser-dev) to get notifications for releases, security announcements, etc.
- Participate in the [biweekly community meetings](https://docs.google.com/document/d/1Sj5u47K3WUGYNPmQHGFpb52auqZb1FxSlWAQnPADhWI/edit) to disucss development, issues, use cases, etc.
- Join the `#eraser` channel on the [Kubernetes Slack](https://slack.k8s.io/)
- View the [development setup instructions](https://eraser-dev.github.io/eraser/docs/development)
This project welcomes contributions and suggestions.
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
================================================
FILE: docs/versioned_docs/version-v1.1.x/custom-scanner.md
================================================
---
title: Custom Scanner
---
## Creating a Custom Scanner
To create a custom scanner for non-compliant images, use the following [template](https://github.com/eraser-dev/eraser-scanner-template/).
In order to customize your scanner, start by creating a `NewImageProvider()`. The ImageProvider interface can be found can be found [here](../../../pkg/scanners/template/scanner_template.go).
The ImageProvider will allow you to retrieve the list of all non-running and non-excluded images from the collector container through the `ReceiveImages()` function. Process these images with your customized scanner and threshold, and use `SendImages()` to pass the images found non-compliant to the eraser container for removal. Finally, complete the scanning process by calling `Finish()`.
When complete, provide your custom scanner image to Eraser in deployment.
================================================
FILE: docs/versioned_docs/version-v1.1.x/customization.md
================================================
---
title: Customization
---
## Overview
Eraser uses a configmap to configure its behavior. The configmap is part of the
deployment and it is not necessary to deploy it manually. Once deployed, the configmap
can be edited at any time:
```bash
kubectl edit configmap --namespace eraser-system eraser-manager-config
```
If an eraser job is already running, the changes will not take effect until the job completes.
The configuration is in yaml.
## Key Concepts
### Basic architecture
The _manager_ runs as a pod in your cluster and manages _ImageJobs_. Think of
an _ImageJob_ as a unit of work, performed on every node in your cluster. Each
node runs a sub-job. The goal of the _ImageJob_ is to assess the images on your
cluster's nodes, and to remove the images you don't want. There are two stages:
1. Assessment
1. Removal.
### Scheduling
An _ImageJob_ can either be created on-demand (see [Manual Removal](https://eraser-dev.github.io/eraser/docs/manual-removal)),
or they can be spawned on a timer like a cron job. On-demand jobs skip the
assessment stage and get right down to the business of removing the images you
specified. The behavior of an on-demand job is quite different from that of
timed jobs.
### Fault Tolerance
Because an _ImageJob_ runs on every node in your cluster, and the conditions on
each node may vary widely, some of the sub-jobs may fail. If you cannot
tolerate any failure, set the `manager.imageJob.successRatio` property to
`1.0`. If 75% success sounds good to you, set it to `0.75`. In that case, if
fewer than 75% of the pods spawned by the _ImageJob_ report success, the job as
a whole will be marked as a failure.
This is mainly to help diagnose error conditions. As such, you can set
`manager.imageJob.cleanup.delayOnFailure` to a long value so that logs can be
captured before the spawned pods are cleaned up.
### Excluding Nodes
For various reasons, you may want to prevent Eraser from scheduling pods on
certain nodes. To do so, the nodes can be given a special label. By default,
this label is `eraser.sh/cleanup.filter`, but you can configure the behavior with
the options under `manager.nodeFilter`. The [table](#detailed-options) provides more detail.
### Configuring Components
An _ImageJob_ is made up of various sub-jobs, with one sub-job for each node.
These sub-jobs can be broken down further into three stages.
1. Collection (What is on the node?)
1. Scanning (What images conform to the policy I've provided?)
1. Removal (Remove images based on the results of the above)
Of the above stages, only Removal is mandatory. The others can be disabled.
Furthermore, manually triggered _ImageJobs_ will skip right to removal, even if
Eraser is configured to collect and scan. Collection and Scanning will only
take place when:
1. The collector and/or scanner `components` are enabled, AND
1. The job was *not* triggered manually by creating an _ImageList_.
Disabling scanner will remove all non-running images by default.
### Swapping out components
The collector, scanner, and remover components can all be swapped out. This
enables you to build and host the images yourself. In addition, the scanner's
behavior can be completely tailored to your needs by swapping out the default
image with one of your own. To specify the images, use the
`components..image.repo` and `components..image.tag`,
where `` is one of `collector`, `scanner`, or `remover`.
## Universal Options
The following portions of the configmap apply no matter how you spawn your
_ImageJob_. The values provided below are the defaults. For more detail on
these options, see the [table](#detailed-options).
```yaml
manager:
runtime: containerd
otlpEndpoint: "" # empty string disables OpenTelemetry
logLevel: info
profile:
enabled: false
port: 6060
imageJob:
successRatio: 1.0
cleanup:
delayOnSuccess: 0s
delayOnFailure: 24h
pullSecrets: [] # image pull secrets for collector/scanner/remover
priorityClassName: "" # priority class name for collector/scanner/remover
nodeFilter:
type: exclude # must be either exclude|include
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
components:
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Component Options
```yaml
components:
collector:
enabled: true
image:
repo: ghcr.io/eraser-dev/collector
tag: v1.0.0
request:
mem: 25Mi
cpu: 7m
limit:
mem: 500Mi
cpu: 0
scanner:
enabled: true
image:
repo: ghcr.io/eraser-dev/eraser-trivy-scanner
tag: v1.0.0
request:
mem: 500Mi
cpu: 1000m
limit:
mem: 2Gi
cpu: 0
config: |
# this is the schema for the provided 'trivy-scanner'. custom scanners
# will define their own configuration. see the below
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Scanner Options
These options can be provided to `components.scanner.config`. They will be
passed through as a string to the scanner container and parsed there. If you
want to configure your own scanner, you must provide some way to parse this.
Below are the values recognized by the provided `eraser-trivy-scanner` image.
Values provided below are the defaults.
```yaml
cacheDir: /var/lib/trivy # The file path inside the container to store the cache
dbRepo: ghcr.io/aquasecurity/trivy-db # The container registry from which to fetch the trivy database
deleteFailedImages: true # if true, remove images for which scanning fails, regardless of why it failed
deleteEOLImages: true # if true, remove images that have reached their end-of-life date
vulnerabilities:
ignoreUnfixed: true # consider the image compliant if there are no known fixes for the vulnerabilities found.
types: # a list of vulnerability types. for more info, see trivy's documentation.
- os
- library
securityChecks: # see trivy's documentation for more invormation
- vuln
severities: # in this case, only flag images with CRITICAL vulnerability for removal
- CRITICAL
timeout:
total: 23h # if scanning isn't completed before this much time elapses, abort the whole scan
perImage: 1h # if scanning a single image exceeds this time, scanning will be aborted
```
## Detailed Options
| Option | Description | Default |
| --- | --- | --- |
| manager.runtime | The runtime to use for the manager's containers. Must be one of containerd, crio, or dockershim. It is assumed that your nodes are all using the same runtime, and there is currently no way to configure multiple runtimes. | containerd |
| manager.otlpEndpoint | The endpoint to send OpenTelemetry data to. If empty, data will not be sent. | "" |
| manager.logLevel | The log level for the manager's containers. Must be one of debug, info, warn, error, dpanic, panic, or fatal. | info |
| manager.scheduling.repeatInterval | Use only when collector ando/or scanner are enabled. This is like a cron job, and will spawn an _ImageJob_ at the interval provided. | 24h |
| manager.scheduling.beginImmediately | If set to true, the fist _ImageJob_ will run immediately. If false, the job will not be spawned until after the interval (above) has elapsed. | true |
| manager.profile.enabled | Whether to enable profiling for the manager's containers. This is for debugging with `go tool pprof`. | false |
| manager.profile.port | The port on which to expose the profiling endpoint. | 6060 |
| manager.imageJob.successRatio | The ratio of successful image jobs required before a cleanup is performed. | 1.0 |
| manager.imageJob.cleanup.delayOnSuccess | The amount of time to wait after a successful image job before performing cleanup. | 0s |
| manager.imageJob.cleanup.delayOnFailure | The amount of time to wait after a failed image job before performing cleanup. | 24h |
| manager.pullSecrets | The image pull secrets to use for collector, scanner, and remover containers. | [] |
| manager.priorityClassName | The priority class to use for collector, scanner, and remover containers. | "" |
| manager.nodeFilter.type | The type of node filter to use. Must be either "exclude" or "include". | exclude |
| manager.nodeFilter.selectors | A list of selectors used to filter nodes. | [] |
| components.collector.enabled | Whether to enable the collector component. | true |
| components.collector.image.repo | The repository containing the collector image. | ghcr.io/eraser-dev/collector |
| components.collector.image.tag | The tag of the collector image. | v1.0.0 |
| components.collector.request.mem | The amount of memory to request for the collector container. | 25Mi |
| components.collector.request.cpu | The amount of CPU to request for the collector container. | 7m |
| components.collector.limit.mem | The maximum amount of memory the collector container is allowed to use. | 500Mi |
| components.collector.limit.cpu | The maximum amount of CPU the collector container is allowed to use. | 0 |
| components.scanner.enabled | Whether to enable the scanner component. | true |
| components.scanner.image.repo | The repository containing the scanner image. | ghcr.io/eraser-dev/eraser-trivy-scanner |
| components.scanner.image.tag | The tag of the scanner image. | v1.0.0 |
| components.scanner.request.mem | The amount of memory to request for the scanner container. | 500Mi |
| components.scanner.request.cpu | The amount of CPU to request for the scanner container. | 1000m |
| components.scanner.limit.mem | The maximum amount of memory the scanner container is allowed to use. | 2Gi |
| components.scanner.limit.cpu | The maximum amount of CPU the scanner container is allowed to use. | 0 |
| components.scanner.config | The configuration to pass to the scanner container, as a YAML string. | See YAML below |
| components.remover.image.repo | The repository containing the remover image. | ghcr.io/eraser-dev/remover |
| components.remover.image.tag | The tag of the remover image. | v1.0.0 |
| components.remover.request.mem | The amount of memory to request for the remover container. | 25Mi |
| components.remover.request.cpu | The amount of CPU to request for the remover container. | 0 |
================================================
FILE: docs/versioned_docs/version-v1.1.x/exclusion.md
================================================
---
title: Exclusion
---
## Excluding registries, repositories, and images
Eraser can exclude registries (example, `docker.io/library/*`) and also specific images with a tag (example, `docker.io/library/ubuntu:18.04`) or digest (example, `sha256:80f31da1ac7b312ba29d65080fd...`) from its removal process.
To exclude any images or registries from the removal, create configmap(s) with the label `eraser.sh/exclude.list=true` in the eraser-system namespace with a JSON file holding the excluded images.
```bash
$ cat > sample.json <<"EOF"
{
"excluded": [
"docker.io/library/*",
"ghcr.io/eraser-dev/test:latest"
]
}
EOF
$ kubectl create configmap excluded --from-file=sample.json --namespace=eraser-system
$ kubectl label configmap excluded eraser.sh/exclude.list=true -n eraser-system
```
## Exempting Nodes from the Eraser Pipeline
Exempting nodes from cleanup was added in v1.0.0. When deploying Eraser, you can specify whether there is a list of nodes you would like to `include` or `exclude` from the cleanup process using the configmap. For more information, see the section on [customization](https://eraser-dev.github.io/eraser/docs/customization).
================================================
FILE: docs/versioned_docs/version-v1.1.x/faq.md
================================================
---
title: FAQ
---
## Why am I still seeing vulnerable images?
Eraser currently targets **non-running** images, so any vulnerable images that are currently running will not be removed. In addition, the default vulnerability scanning with Trivy removes images with `CRITICAL` vulnerabilities. Any images with lower vulnerabilities will not be removed. This can be configured using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#scanner-options).
## How is Eraser different from Kubernetes garbage collection?
The native garbage collection in Kubernetes works a bit differently than Eraser. By default, garbage collection begins when disk usage reaches 85%, and stops when it gets down to 80%. More details about Kubernetes garbage collection can be found in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/architecture/garbage-collection/), and configuration options can be found in the [Kubelet documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).
There are a couple core benefits to using Eraser for image cleanup:
* Eraser can be configured to use image vulnerability data when making determinations on image removal
* By interfacing directly with the container runtime, Eraser can clean up images that are not managed by Kubelet and Kubernetes
================================================
FILE: docs/versioned_docs/version-v1.1.x/installation.md
================================================
---
title: Installation
---
## Manifest
To install Eraser with the manifest file, run the following command:
```bash
kubectl apply -f https://raw.githubusercontent.com/eraser-dev/eraser/v1.1.0/deploy/eraser.yaml
```
## Helm
If you'd like to install and manage Eraser with Helm, follow the install instructions [here](https://github.com/eraser-dev/eraser/blob/main/charts/eraser/README.md)
================================================
FILE: docs/versioned_docs/version-v1.1.x/introduction.md
================================================
---
title: Introduction
slug: /
---
# Introduction
When deploying to Kubernetes, it's common for pipelines to build and push images to a cluster, but it's much less common for these images to be cleaned up. This can lead to accumulating bloat on the disk, and a host of non-compliant images lingering on the nodes.
The current garbage collection process deletes images based on a percentage of load, but this process does not consider the vulnerability state of the images. **Eraser** aims to provide a simple way to determine the state of an image, and delete it if it meets the specified criteria.
================================================
FILE: docs/versioned_docs/version-v1.1.x/manual-removal.md
================================================
---
title: Manual Removal
---
Create an `ImageList` and specify the images you would like to remove. In this case, the image `docker.io/library/alpine:3.7.3` will be removed.
```shell
cat < `ImageList` is a cluster-scoped resource and must be called imagelist. `"*"` can be specified to remove all non-running images instead of individual images.
Creating an `ImageList` should trigger an `ImageJob` that will deploy Eraser pods on every node to perform the removal given the list of images.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-55d54c4fb6-dcglq 1/1 Running 0 9m8s
eraser-system eraser-kind-control-plane 1/1 Running 0 11s
eraser-system eraser-kind-worker 1/1 Running 0 11s
eraser-system eraser-kind-worker2 1/1 Running 0 11s
```
Pods will run to completion and the images will be removed.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-6d6d5594d4-phl2q 1/1 Running 0 4m16s
eraser-system eraser-kind-control-plane 0/1 Completed 0 22s
eraser-system eraser-kind-worker 0/1 Completed 0 22s
eraser-system eraser-kind-worker2 0/1 Completed 0 22s
```
The `ImageList` custom resource status field will contain the status of the last job. The success and failure counts indicate the number of nodes the Eraser agent was run on.
```shell
$ kubectl describe ImageList imagelist
...
Status:
Failed: 0
Success: 3
Timestamp: 2022-02-25T23:41:55Z
...
```
Verify the unused images are removed.
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
```
If the image has been successfully removed, there will be no output.
================================================
FILE: docs/versioned_docs/version-v1.1.x/metrics.md
================================================
---
title: Metrics
---
To view Eraser metrics, you will need to deploy an Open Telemetry collector in the 'eraser-system' namespace, and an exporter. An example collector with a Prometheus exporter is [otelcollector.yaml](https://github.com/eraser-dev/eraser/blob/main/test/e2e/test-data/otelcollector.yaml), and the endpoint can be specified using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#universal-options). In this example, we are logging the collected data to the otel-collector pod, and exporting metrics through Prometheus at 'http://localhost:8889/metrics', but a separate exporter can also be configured.
Below is the list of metrics provided by Eraser per run:
#### Eraser
```yaml
- count
- name: images_removed_run_total
- description: Total images removed by eraser
```
#### Scanner
```yaml
- count
- name: vulnerable_images_run_total
- description: Total vulnerable images detected
```
#### ImageJob
```yaml
- count
- name: imagejob_run_total
- description: Total ImageJobs scheduled
- name: pods_completed_run_total
- description: Total pods completed
- name: pods_failed_run_total
- description: Total pods failed
- summary
- name: imagejob_duration_run_seconds
- description: Total time for ImageJobs scheduled to complete
```
================================================
FILE: docs/versioned_docs/version-v1.1.x/quick-start.md
================================================
---
title: Quick Start
---
This tutorial demonstrates the functionality of Eraser and validates that non-running images are removed succesfully.
## Deploy a DaemonSet
After following the [install instructions](installation.md), we'll apply a demo `DaemonSet`. For illustrative purposes, a DaemonSet is applied and deleted so the non-running images remain on all nodes. The alpine image with the `3.7.3` tag will be used in this example. This is an image with a known critical vulnerability.
First, apply the `DaemonSet`:
```shell
cat < 45m v1.24.0
kind-worker2 Ready 44m v1.24.0
```
List the images then filter for `alpine`:
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
docker.io/library/alpine:3.7.3 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
docker.io/library/alpine@sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
```
## Automatically Cleaning Images
After deploying Eraser, it will automatically clean images in a regular interval. This interval can be set using the `manager.scheduling.repeatInterval` setting in the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). The default interval is 24 hours (`24h`). Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
Eraser will schedule eraser pods to each node in the cluster, and each pod will contain 3 containers: collector, scanner, and remover that will run to completion.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system eraser-kind-control-plane-sb789 0/3 Completed 0 26m
eraser-system eraser-kind-worker-j84hm 0/3 Completed 0 26m
eraser-system eraser-kind-worker2-4lbdr 0/3 Completed 0 26m
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 26m
```
The collector container sends the list of all images to the scanner container, which scans and reports non-compliant images to the remover container for removal of images that are non-running. Once all pods are completed, they will be automatically cleaned up.
> If you want to remove all the images periodically, you can skip the scanner container by setting the `components.scanner.enabled` value to `false` using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). In this case, each collector pod will hold 2 containers: collector and remover.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system eraser-kind-control-plane-ksk2b 0/2 Completed 0 50s
eraser-system eraser-kind-worker-cpgqc 0/2 Completed 0 50s
eraser-system eraser-kind-worker2-k25df 0/2 Completed 0 50s
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 55s
```
================================================
FILE: docs/versioned_docs/version-v1.1.x/releasing.md
================================================
---
title: Releasing
---
## Create Release Pull Request
1. Go to `create_release_pull_request` workflow under actions.
2. Select run workflow, and use the workflow from your branch.
3. Input release version with the semantic version identifying the release.
4. Click run workflow and review the PR created by github-actions.
# Releasing
5. Once the PR is merged to `main`, tag that commit with release version and push tags to remote repository.
```
git checkout
git pull origin
git tag -a -m ''
git push origin
```
6. Pushing the release tag will trigger GitHub Actions to trigger `release` job.
This will build the `ghcr.io/eraser-dev/remover`, `ghcr.io/eraser-dev/eraser-manager`, `ghcr.io/eraser-dev/collector`, and `ghcr.io/eraser-dev/eraser-trivy-scanner` images automatically, then publish the new release tag.
## Publishing
1. GitHub Action will create a new release, review and edit it at https://github.com/eraser-dev/eraser/releases
================================================
FILE: docs/versioned_docs/version-v1.1.x/setup.md
================================================
---
title: Setup
---
# Development Setup
This document describes the steps to get started with development.
You can either utilize [Codespaces](https://docs.github.com/en/codespaces/overview) or setup a local environment.
## Local Setup
### Prerequisites:
- [go](https://go.dev/) with version 1.17 or later.
- [docker](https://docs.docker.com/get-docker/)
- [kind](https://kind.sigs.k8s.io/)
- `make`
### Get things running
- Get dependencies with `go get`
- This project uses `make`. You can utilize `make help` to see available targets. For local deployment make targets help to build, test and deploy.
### Making changes
Please refer to [Development Reference](#development-reference) for more details on the specific commands.
To test your changes on a cluster:
```bash
# generate necessary api files (optional - only needed if changes to api folder).
make generate
# build applicable images
make docker-build-manager MANAGER_IMG=eraser-manager:dev
make docker-build-remover REMOVER_IMG=remover:dev
make docker-build-collector COLLECTOR_IMG=collector:dev
make docker-build-trivy-scanner TRIVY_SCANNER_IMG=eraser-trivy-scanner:dev
# make sure updated image is present on cluster (e.g., see kind example below)
kind load docker-image \
eraser-manager:dev \
eraser-trivy-scanner:dev \
remover:dev \
collector:dev
make manifests
make deploy
# to remove the deployment
make undeploy
```
To test your changes to manager locally:
```bash
make run
```
Example Output:
```
you@local:~/eraser$ make run
docker build . \
-t eraser-tooling \
-f build/tooling/Dockerfile
[+] Building 7.8s (8/8) FINISHED
=> => naming to docker.io/library/eraser-tooling 0.0s
docker run -v /home/eraser/config:/config -w /config/manager \
registry.k8s.io/kustomize/kustomize:v3.8.9 edit set image controller=eraser-manager:dev
docker run -v /home/eraser:/eraser eraser-tooling controller-gen \
crd \
rbac:roleName=manager-role \
webhook \
paths="./..." \
output:crd:artifacts:config=config/crd/bases
rm -rf manifest_staging
mkdir -p manifest_staging/deploy
docker run --rm -v /home/eraser:/eraser \
registry.k8s.io/kustomize/kustomize:v3.8.9 build \
/eraser/config/default -o /eraser/manifest_staging/deploy/eraser.yaml
docker run -v /home/eraser:/eraser eraser-tooling controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
go fmt ./...
go vet ./...
go run ./main.go
{"level":"info","ts":1652985685.1663408,"logger":"controller-runtime.metrics","msg":"Metrics server is starting to listen","addr":":8080"}
...
```
## Development Reference
Eraser is using tooling from [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). For Eraser this tooling is containerized into the `eraser-tooling` image. The `make` targets can use this tooling and build the image when necessary.
You can override the default configuration using environment variables. Below you can find a reference of targets and configuration options.
### Common Configuration
| Environment Variable | Description |
| -------------------- | --------------------------------------------------------------------------------------------- |
| VERSION | Specifies the version (i.e., the image tag) of eraser to be used. |
| MANAGER_IMG | Defines the image url for the Eraser manager. Used for tagging, pulling and pushing the image |
| REMOVER_IMG | Defines the image url for the Eraser. Used for tagging, pulling and pushing the image |
| COLLECTOR_IMG | Defines the image url for the Collector. Used for tagging, pulling and pushing the image |
### Linting
- `make lint`
Lints the go code.
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------- |
| GOLANGCI_LINT | Specifies the go linting binary to be used for linting. |
### Development
- `make generate`
Generates necessary files for the k8s api stored under `api/v1alpha1/zz_generated.deepcopy.go`. See the [kubebuilder docs](https://book.kubebuilder.io/cronjob-tutorial/other-api-files.html) for details.
- `make manifests`
Generates the eraser deployment yaml files under `manifest_staging/deploy`.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------- |
| REMOVER_IMG | Defines the image url for the Eraser. |
| MANAGER_IMG | Defines the image url for the Eraser manager. |
| KUSTOMIZE_VERSION | Define Kustomize version for generating manifests. |
- `make test`
Runs the unit tests for the eraser project.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------- |
| ENVTEST | Specifies the envtest setup binary. |
| ENVTEST_K8S_VERSION | Specifies the Kubernetes version for envtest setup command. |
- `make e2e-test`
Runs e2e tests on a cluster.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------- |
| REMOVER_IMG | Eraser image to be used for e2e test. |
| MANAGER_IMG | Eraser manager image to be used for e2e test. |
| KUBERNETES_VERSION | Kubernetes version for e2e test. |
| TEST_COUNT | Sets repetition for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TIMEOUT | Sets timeout for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TESTFLAGS | Sets additional test flags |
### Build
- `make build`
Builds the eraser manager binaries.
- `make run`
Runs the eraser manager on your local machine.
- `make docker-build-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-remover`
Builds the docker image for eraser remover.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| REMOVER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-remover`
Builds the docker image for the eraser remover.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| REMOVER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
### Deployment
- `make install`
Install CRDs into the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make uninstall`
Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make deploy`
Deploys eraser to the cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
| MANAGER_IMG | Specifies the eraser manager image version to be used for deployment |
- `make undeploy`
Undeploy controller from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources that need to be removed. |
### Release
- `make release-manifest`
Generates k8s manifests files for a release.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------ |
| NEWVERSION | Sets the new version in the Makefile |
- `make promote-staging-manifest`
Promotes the k8s deployment yaml files to release.
================================================
FILE: docs/versioned_docs/version-v1.1.x/trivy.md
================================================
---
title: Trivy
---
## Trivy Provider Options
The trivy provider is used in Eraser for image scanning and detecting vulnerabilities. See [Customization](https://eraser-dev.github.io/eraser/docs/customization#scanner-options) for more details on configuring the scanner.
================================================
FILE: docs/versioned_docs/version-v1.2.x/architecture.md
================================================
---
title: Architecture
---
At a high level, Eraser has two main modes of operation: manual and automated.
Manual image removal involves supplying a list of images to remove; Eraser then
deploys pods to clean up the images you supplied.
Automated image removal runs on a timer. By default, the automated process
removes images based on the results of a vulnerability scan. The default
vulnerability scanner is Trivy, but others can be provided in its place. Or,
the scanner can be disabled altogether, in which case Eraser acts as a garbage
collector -- it will remove all non-running images in your cluster.
## Manual image cleanup
## Automated analysis, scanning, and cleanup
================================================
FILE: docs/versioned_docs/version-v1.2.x/code-of-conduct.md
================================================
---
title: Code of Conduct
---
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
Resources:
- [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
- [Code of Conduct Reporting](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
================================================
FILE: docs/versioned_docs/version-v1.2.x/contributing.md
================================================
---
title: Contributing
---
There are several ways to get involved with Eraser
- Join the [mailing list](https://groups.google.com/u/1/g/eraser-dev) to get notifications for releases, security announcements, etc.
- Participate in the [biweekly community meetings](https://docs.google.com/document/d/1Sj5u47K3WUGYNPmQHGFpb52auqZb1FxSlWAQnPADhWI/edit) to disucss development, issues, use cases, etc.
- Join the `#eraser` channel on the [Kubernetes Slack](https://slack.k8s.io/)
- View the [development setup instructions](https://eraser-dev.github.io/eraser/docs/development)
This project welcomes contributions and suggestions.
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
================================================
FILE: docs/versioned_docs/version-v1.2.x/custom-scanner.md
================================================
---
title: Custom Scanner
---
## Creating a Custom Scanner
To create a custom scanner for non-compliant images, use the following [template](https://github.com/eraser-dev/eraser-scanner-template/).
In order to customize your scanner, start by creating a `NewImageProvider()`. The ImageProvider interface can be found can be found [here](../../../pkg/scanners/template/scanner_template.go).
The ImageProvider will allow you to retrieve the list of all non-running and non-excluded images from the collector container through the `ReceiveImages()` function. Process these images with your customized scanner and threshold, and use `SendImages()` to pass the images found non-compliant to the eraser container for removal. Finally, complete the scanning process by calling `Finish()`.
When complete, provide your custom scanner image to Eraser in deployment.
================================================
FILE: docs/versioned_docs/version-v1.2.x/customization.md
================================================
---
title: Customization
---
## Overview
Eraser uses a configmap to configure its behavior. The configmap is part of the
deployment and it is not necessary to deploy it manually. Once deployed, the configmap
can be edited at any time:
```bash
kubectl edit configmap --namespace eraser-system eraser-manager-config
```
If an eraser job is already running, the changes will not take effect until the job completes.
The configuration is in yaml.
## Key Concepts
### Basic architecture
The _manager_ runs as a pod in your cluster and manages _ImageJobs_. Think of
an _ImageJob_ as a unit of work, performed on every node in your cluster. Each
node runs a sub-job. The goal of the _ImageJob_ is to assess the images on your
cluster's nodes, and to remove the images you don't want. There are two stages:
1. Assessment
1. Removal.
### Scheduling
An _ImageJob_ can either be created on-demand (see [Manual Removal](https://eraser-dev.github.io/eraser/docs/manual-removal)),
or they can be spawned on a timer like a cron job. On-demand jobs skip the
assessment stage and get right down to the business of removing the images you
specified. The behavior of an on-demand job is quite different from that of
timed jobs.
### Fault Tolerance
Because an _ImageJob_ runs on every node in your cluster, and the conditions on
each node may vary widely, some of the sub-jobs may fail. If you cannot
tolerate any failure, set the `manager.imageJob.successRatio` property to
`1.0`. If 75% success sounds good to you, set it to `0.75`. In that case, if
fewer than 75% of the pods spawned by the _ImageJob_ report success, the job as
a whole will be marked as a failure.
This is mainly to help diagnose error conditions. As such, you can set
`manager.imageJob.cleanup.delayOnFailure` to a long value so that logs can be
captured before the spawned pods are cleaned up.
### Excluding Nodes
For various reasons, you may want to prevent Eraser from scheduling pods on
certain nodes. To do so, the nodes can be given a special label. By default,
this label is `eraser.sh/cleanup.filter`, but you can configure the behavior with
the options under `manager.nodeFilter`. The [table](#detailed-options) provides more detail.
### Configuring Components
An _ImageJob_ is made up of various sub-jobs, with one sub-job for each node.
These sub-jobs can be broken down further into three stages.
1. Collection (What is on the node?)
1. Scanning (What images conform to the policy I've provided?)
1. Removal (Remove images based on the results of the above)
Of the above stages, only Removal is mandatory. The others can be disabled.
Furthermore, manually triggered _ImageJobs_ will skip right to removal, even if
Eraser is configured to collect and scan. Collection and Scanning will only
take place when:
1. The collector and/or scanner `components` are enabled, AND
1. The job was *not* triggered manually by creating an _ImageList_.
Disabling scanner will remove all non-running images by default.
### Swapping out components
The collector, scanner, and remover components can all be swapped out. This
enables you to build and host the images yourself. In addition, the scanner's
behavior can be completely tailored to your needs by swapping out the default
image with one of your own. To specify the images, use the
`components..image.repo` and `components..image.tag`,
where `` is one of `collector`, `scanner`, or `remover`.
## Universal Options
The following portions of the configmap apply no matter how you spawn your
_ImageJob_. The values provided below are the defaults. For more detail on
these options, see the [table](#detailed-options).
```yaml
manager:
runtime: containerd
otlpEndpoint: "" # empty string disables OpenTelemetry
logLevel: info
profile:
enabled: false
port: 6060
imageJob:
successRatio: 1.0
cleanup:
delayOnSuccess: 0s
delayOnFailure: 24h
pullSecrets: [] # image pull secrets for collector/scanner/remover
priorityClassName: "" # priority class name for collector/scanner/remover
nodeFilter:
type: exclude # must be either exclude|include
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
components:
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Component Options
```yaml
components:
collector:
enabled: true
image:
repo: ghcr.io/eraser-dev/collector
tag: v1.0.0
request:
mem: 25Mi
cpu: 7m
limit:
mem: 500Mi
cpu: 0
scanner:
enabled: true
image:
repo: ghcr.io/eraser-dev/eraser-trivy-scanner
tag: v1.0.0
request:
mem: 500Mi
cpu: 1000m
limit:
mem: 2Gi
cpu: 0
config: |
# this is the schema for the provided 'trivy-scanner'. custom scanners
# will define their own configuration. see the below
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Scanner Options
These options can be provided to `components.scanner.config`. They will be
passed through as a string to the scanner container and parsed there. If you
want to configure your own scanner, you must provide some way to parse this.
Below are the values recognized by the provided `eraser-trivy-scanner` image.
Values provided below are the defaults.
```yaml
cacheDir: /var/lib/trivy # The file path inside the container to store the cache
dbRepo: ghcr.io/aquasecurity/trivy-db # The container registry from which to fetch the trivy database
deleteFailedImages: true # if true, remove images for which scanning fails, regardless of why it failed
deleteEOLImages: true # if true, remove images that have reached their end-of-life date
vulnerabilities:
ignoreUnfixed: true # consider the image compliant if there are no known fixes for the vulnerabilities found.
types: # a list of vulnerability types. for more info, see trivy's documentation.
- os
- library
securityChecks: # see trivy's documentation for more invormation
- vuln
severities: # in this case, only flag images with CRITICAL vulnerability for removal
- CRITICAL
timeout:
total: 23h # if scanning isn't completed before this much time elapses, abort the whole scan
perImage: 1h # if scanning a single image exceeds this time, scanning will be aborted
```
## Detailed Options
| Option | Description | Default |
| --- | --- | --- |
| manager.runtime | The runtime to use for the manager's containers. Must be one of containerd, crio, or dockershim. It is assumed that your nodes are all using the same runtime, and there is currently no way to configure multiple runtimes. | containerd |
| manager.otlpEndpoint | The endpoint to send OpenTelemetry data to. If empty, data will not be sent. | "" |
| manager.logLevel | The log level for the manager's containers. Must be one of debug, info, warn, error, dpanic, panic, or fatal. | info |
| manager.scheduling.repeatInterval | Use only when collector ando/or scanner are enabled. This is like a cron job, and will spawn an _ImageJob_ at the interval provided. | 24h |
| manager.scheduling.beginImmediately | If set to true, the fist _ImageJob_ will run immediately. If false, the job will not be spawned until after the interval (above) has elapsed. | true |
| manager.profile.enabled | Whether to enable profiling for the manager's containers. This is for debugging with `go tool pprof`. | false |
| manager.profile.port | The port on which to expose the profiling endpoint. | 6060 |
| manager.imageJob.successRatio | The ratio of successful image jobs required before a cleanup is performed. | 1.0 |
| manager.imageJob.cleanup.delayOnSuccess | The amount of time to wait after a successful image job before performing cleanup. | 0s |
| manager.imageJob.cleanup.delayOnFailure | The amount of time to wait after a failed image job before performing cleanup. | 24h |
| manager.pullSecrets | The image pull secrets to use for collector, scanner, and remover containers. | [] |
| manager.priorityClassName | The priority class to use for collector, scanner, and remover containers. | "" |
| manager.nodeFilter.type | The type of node filter to use. Must be either "exclude" or "include". | exclude |
| manager.nodeFilter.selectors | A list of selectors used to filter nodes. | [] |
| components.collector.enabled | Whether to enable the collector component. | true |
| components.collector.image.repo | The repository containing the collector image. | ghcr.io/eraser-dev/collector |
| components.collector.image.tag | The tag of the collector image. | v1.0.0 |
| components.collector.request.mem | The amount of memory to request for the collector container. | 25Mi |
| components.collector.request.cpu | The amount of CPU to request for the collector container. | 7m |
| components.collector.limit.mem | The maximum amount of memory the collector container is allowed to use. | 500Mi |
| components.collector.limit.cpu | The maximum amount of CPU the collector container is allowed to use. | 0 |
| components.scanner.enabled | Whether to enable the scanner component. | true |
| components.scanner.image.repo | The repository containing the scanner image. | ghcr.io/eraser-dev/eraser-trivy-scanner |
| components.scanner.image.tag | The tag of the scanner image. | v1.0.0 |
| components.scanner.request.mem | The amount of memory to request for the scanner container. | 500Mi |
| components.scanner.request.cpu | The amount of CPU to request for the scanner container. | 1000m |
| components.scanner.limit.mem | The maximum amount of memory the scanner container is allowed to use. | 2Gi |
| components.scanner.limit.cpu | The maximum amount of CPU the scanner container is allowed to use. | 0 |
| components.scanner.config | The configuration to pass to the scanner container, as a YAML string. | See YAML below |
| components.remover.image.repo | The repository containing the remover image. | ghcr.io/eraser-dev/remover |
| components.remover.image.tag | The tag of the remover image. | v1.0.0 |
| components.remover.request.mem | The amount of memory to request for the remover container. | 25Mi |
| components.remover.request.cpu | The amount of CPU to request for the remover container. | 0 |
================================================
FILE: docs/versioned_docs/version-v1.2.x/exclusion.md
================================================
---
title: Exclusion
---
## Excluding registries, repositories, and images
Eraser can exclude registries (example, `docker.io/library/*`) and also specific images with a tag (example, `docker.io/library/ubuntu:18.04`) or digest (example, `sha256:80f31da1ac7b312ba29d65080fd...`) from its removal process.
To exclude any images or registries from the removal, create configmap(s) with the label `eraser.sh/exclude.list=true` in the eraser-system namespace with a JSON file holding the excluded images.
```bash
$ cat > sample.json <<"EOF"
{
"excluded": [
"docker.io/library/*",
"ghcr.io/eraser-dev/test:latest"
]
}
EOF
$ kubectl create configmap excluded --from-file=sample.json --namespace=eraser-system
$ kubectl label configmap excluded eraser.sh/exclude.list=true -n eraser-system
```
## Exempting Nodes from the Eraser Pipeline
Exempting nodes from cleanup was added in v1.0.0. When deploying Eraser, you can specify whether there is a list of nodes you would like to `include` or `exclude` from the cleanup process using the configmap. For more information, see the section on [customization](https://eraser-dev.github.io/eraser/docs/customization).
================================================
FILE: docs/versioned_docs/version-v1.2.x/faq.md
================================================
---
title: FAQ
---
## Why am I still seeing vulnerable images?
Eraser currently targets **non-running** images, so any vulnerable images that are currently running will not be removed. In addition, the default vulnerability scanning with Trivy removes images with `CRITICAL` vulnerabilities. Any images with lower vulnerabilities will not be removed. This can be configured using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#scanner-options).
## How is Eraser different from Kubernetes garbage collection?
The native garbage collection in Kubernetes works a bit differently than Eraser. By default, garbage collection begins when disk usage reaches 85%, and stops when it gets down to 80%. More details about Kubernetes garbage collection can be found in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/architecture/garbage-collection/), and configuration options can be found in the [Kubelet documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).
There are a couple core benefits to using Eraser for image cleanup:
* Eraser can be configured to use image vulnerability data when making determinations on image removal
* By interfacing directly with the container runtime, Eraser can clean up images that are not managed by Kubelet and Kubernetes
================================================
FILE: docs/versioned_docs/version-v1.2.x/installation.md
================================================
---
title: Installation
---
## Manifest
To install Eraser with the manifest file, run the following command:
```bash
kubectl apply -f https://raw.githubusercontent.com/eraser-dev/eraser/v1.2.0/deploy/eraser.yaml
```
## Helm
If you'd like to install and manage Eraser with Helm, follow the install instructions [here](https://github.com/eraser-dev/eraser/blob/main/charts/eraser/README.md)
================================================
FILE: docs/versioned_docs/version-v1.2.x/introduction.md
================================================
---
title: Introduction
slug: /
---
# Introduction
When deploying to Kubernetes, it's common for pipelines to build and push images to a cluster, but it's much less common for these images to be cleaned up. This can lead to accumulating bloat on the disk, and a host of non-compliant images lingering on the nodes.
The current garbage collection process deletes images based on a percentage of load, but this process does not consider the vulnerability state of the images. **Eraser** aims to provide a simple way to determine the state of an image, and delete it if it meets the specified criteria.
================================================
FILE: docs/versioned_docs/version-v1.2.x/manual-removal.md
================================================
---
title: Manual Removal
---
Create an `ImageList` and specify the images you would like to remove. In this case, the image `docker.io/library/alpine:3.7.3` will be removed.
```shell
cat < `ImageList` is a cluster-scoped resource and must be called imagelist. `"*"` can be specified to remove all non-running images instead of individual images.
Creating an `ImageList` should trigger an `ImageJob` that will deploy Eraser pods on every node to perform the removal given the list of images.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-55d54c4fb6-dcglq 1/1 Running 0 9m8s
eraser-system eraser-kind-control-plane 1/1 Running 0 11s
eraser-system eraser-kind-worker 1/1 Running 0 11s
eraser-system eraser-kind-worker2 1/1 Running 0 11s
```
Pods will run to completion and the images will be removed.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-6d6d5594d4-phl2q 1/1 Running 0 4m16s
eraser-system eraser-kind-control-plane 0/1 Completed 0 22s
eraser-system eraser-kind-worker 0/1 Completed 0 22s
eraser-system eraser-kind-worker2 0/1 Completed 0 22s
```
The `ImageList` custom resource status field will contain the status of the last job. The success and failure counts indicate the number of nodes the Eraser agent was run on.
```shell
$ kubectl describe ImageList imagelist
...
Status:
Failed: 0
Success: 3
Timestamp: 2022-02-25T23:41:55Z
...
```
Verify the unused images are removed.
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
```
If the image has been successfully removed, there will be no output.
================================================
FILE: docs/versioned_docs/version-v1.2.x/metrics.md
================================================
---
title: Metrics
---
To view Eraser metrics, you will need to deploy an Open Telemetry collector in the 'eraser-system' namespace, and an exporter. An example collector with a Prometheus exporter is [otelcollector.yaml](https://github.com/eraser-dev/eraser/blob/main/test/e2e/test-data/otelcollector.yaml), and the endpoint can be specified using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#universal-options). In this example, we are logging the collected data to the otel-collector pod, and exporting metrics through Prometheus at 'http://localhost:8889/metrics', but a separate exporter can also be configured.
Below is the list of metrics provided by Eraser per run:
#### Eraser
```yaml
- count
- name: images_removed_run_total
- description: Total images removed by eraser
```
#### Scanner
```yaml
- count
- name: vulnerable_images_run_total
- description: Total vulnerable images detected
```
#### ImageJob
```yaml
- count
- name: imagejob_run_total
- description: Total ImageJobs scheduled
- name: pods_completed_run_total
- description: Total pods completed
- name: pods_failed_run_total
- description: Total pods failed
- summary
- name: imagejob_duration_run_seconds
- description: Total time for ImageJobs scheduled to complete
```
================================================
FILE: docs/versioned_docs/version-v1.2.x/quick-start.md
================================================
---
title: Quick Start
---
This tutorial demonstrates the functionality of Eraser and validates that non-running images are removed succesfully.
## Deploy a DaemonSet
After following the [install instructions](installation.md), we'll apply a demo `DaemonSet`. For illustrative purposes, a DaemonSet is applied and deleted so the non-running images remain on all nodes. The alpine image with the `3.7.3` tag will be used in this example. This is an image with a known critical vulnerability.
First, apply the `DaemonSet`:
```shell
cat < 45m v1.24.0
kind-worker2 Ready 44m v1.24.0
```
List the images then filter for `alpine`:
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
docker.io/library/alpine:3.7.3 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
docker.io/library/alpine@sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
```
## Automatically Cleaning Images
After deploying Eraser, it will automatically clean images in a regular interval. This interval can be set using the `manager.scheduling.repeatInterval` setting in the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). The default interval is 24 hours (`24h`). Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
Eraser will schedule eraser pods to each node in the cluster, and each pod will contain 3 containers: collector, scanner, and remover that will run to completion.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system eraser-kind-control-plane-sb789 0/3 Completed 0 26m
eraser-system eraser-kind-worker-j84hm 0/3 Completed 0 26m
eraser-system eraser-kind-worker2-4lbdr 0/3 Completed 0 26m
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 26m
```
The collector container sends the list of all images to the scanner container, which scans and reports non-compliant images to the remover container for removal of images that are non-running. Once all pods are completed, they will be automatically cleaned up.
> If you want to remove all the images periodically, you can skip the scanner container by setting the `components.scanner.enabled` value to `false` using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). In this case, each collector pod will hold 2 containers: collector and remover.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system eraser-kind-control-plane-ksk2b 0/2 Completed 0 50s
eraser-system eraser-kind-worker-cpgqc 0/2 Completed 0 50s
eraser-system eraser-kind-worker2-k25df 0/2 Completed 0 50s
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 55s
```
================================================
FILE: docs/versioned_docs/version-v1.2.x/releasing.md
================================================
---
title: Releasing
---
## Create Release Pull Request
1. Go to `create_release_pull_request` workflow under actions.
2. Select run workflow, and use the workflow from your branch.
3. Input release version with the semantic version identifying the release.
4. Click run workflow and review the PR created by github-actions.
# Releasing
5. Once the PR is merged to `main`, tag that commit with release version and push tags to remote repository.
```
git checkout
git pull origin
git tag -a -m ''
git push origin
```
6. Pushing the release tag will trigger GitHub Actions to trigger `release` job.
This will build the `ghcr.io/eraser-dev/remover`, `ghcr.io/eraser-dev/eraser-manager`, `ghcr.io/eraser-dev/collector`, and `ghcr.io/eraser-dev/eraser-trivy-scanner` images automatically, then publish the new release tag.
## Publishing
1. GitHub Action will create a new release, review and edit it at https://github.com/eraser-dev/eraser/releases
================================================
FILE: docs/versioned_docs/version-v1.2.x/setup.md
================================================
---
title: Setup
---
# Development Setup
This document describes the steps to get started with development.
You can either utilize [Codespaces](https://docs.github.com/en/codespaces/overview) or setup a local environment.
## Local Setup
### Prerequisites:
- [go](https://go.dev/) with version 1.17 or later.
- [docker](https://docs.docker.com/get-docker/)
- [kind](https://kind.sigs.k8s.io/)
- `make`
### Get things running
- Get dependencies with `go get`
- This project uses `make`. You can utilize `make help` to see available targets. For local deployment make targets help to build, test and deploy.
### Making changes
Please refer to [Development Reference](#development-reference) for more details on the specific commands.
To test your changes on a cluster:
```bash
# generate necessary api files (optional - only needed if changes to api folder).
make generate
# build applicable images
make docker-build-manager MANAGER_IMG=eraser-manager:dev
make docker-build-remover REMOVER_IMG=remover:dev
make docker-build-collector COLLECTOR_IMG=collector:dev
make docker-build-trivy-scanner TRIVY_SCANNER_IMG=eraser-trivy-scanner:dev
# make sure updated image is present on cluster (e.g., see kind example below)
kind load docker-image \
eraser-manager:dev \
eraser-trivy-scanner:dev \
remover:dev \
collector:dev
make manifests
make deploy
# to remove the deployment
make undeploy
```
To test your changes to manager locally:
```bash
make run
```
Example Output:
```
you@local:~/eraser$ make run
docker build . \
-t eraser-tooling \
-f build/tooling/Dockerfile
[+] Building 7.8s (8/8) FINISHED
=> => naming to docker.io/library/eraser-tooling 0.0s
docker run -v /home/eraser/config:/config -w /config/manager \
registry.k8s.io/kustomize/kustomize:v3.8.9 edit set image controller=eraser-manager:dev
docker run -v /home/eraser:/eraser eraser-tooling controller-gen \
crd \
rbac:roleName=manager-role \
webhook \
paths="./..." \
output:crd:artifacts:config=config/crd/bases
rm -rf manifest_staging
mkdir -p manifest_staging/deploy
docker run --rm -v /home/eraser:/eraser \
registry.k8s.io/kustomize/kustomize:v3.8.9 build \
/eraser/config/default -o /eraser/manifest_staging/deploy/eraser.yaml
docker run -v /home/eraser:/eraser eraser-tooling controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
go fmt ./...
go vet ./...
go run ./main.go
{"level":"info","ts":1652985685.1663408,"logger":"controller-runtime.metrics","msg":"Metrics server is starting to listen","addr":":8080"}
...
```
## Development Reference
Eraser is using tooling from [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). For Eraser this tooling is containerized into the `eraser-tooling` image. The `make` targets can use this tooling and build the image when necessary.
You can override the default configuration using environment variables. Below you can find a reference of targets and configuration options.
### Common Configuration
| Environment Variable | Description |
| -------------------- | --------------------------------------------------------------------------------------------- |
| VERSION | Specifies the version (i.e., the image tag) of eraser to be used. |
| MANAGER_IMG | Defines the image url for the Eraser manager. Used for tagging, pulling and pushing the image |
| REMOVER_IMG | Defines the image url for the Eraser. Used for tagging, pulling and pushing the image |
| COLLECTOR_IMG | Defines the image url for the Collector. Used for tagging, pulling and pushing the image |
### Linting
- `make lint`
Lints the go code.
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------- |
| GOLANGCI_LINT | Specifies the go linting binary to be used for linting. |
### Development
- `make generate`
Generates necessary files for the k8s api stored under `api/v1alpha1/zz_generated.deepcopy.go`. See the [kubebuilder docs](https://book.kubebuilder.io/cronjob-tutorial/other-api-files.html) for details.
- `make manifests`
Generates the eraser deployment yaml files under `manifest_staging/deploy`.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------- |
| REMOVER_IMG | Defines the image url for the Eraser. |
| MANAGER_IMG | Defines the image url for the Eraser manager. |
| KUSTOMIZE_VERSION | Define Kustomize version for generating manifests. |
- `make test`
Runs the unit tests for the eraser project.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------- |
| ENVTEST | Specifies the envtest setup binary. |
| ENVTEST_K8S_VERSION | Specifies the Kubernetes version for envtest setup command. |
- `make e2e-test`
Runs e2e tests on a cluster.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------- |
| REMOVER_IMG | Eraser image to be used for e2e test. |
| MANAGER_IMG | Eraser manager image to be used for e2e test. |
| KUBERNETES_VERSION | Kubernetes version for e2e test. |
| TEST_COUNT | Sets repetition for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TIMEOUT | Sets timeout for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TESTFLAGS | Sets additional test flags |
### Build
- `make build`
Builds the eraser manager binaries.
- `make run`
Runs the eraser manager on your local machine.
- `make docker-build-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-remover`
Builds the docker image for eraser remover.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| REMOVER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-remover`
Builds the docker image for the eraser remover.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| REMOVER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
### Deployment
- `make install`
Install CRDs into the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make uninstall`
Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make deploy`
Deploys eraser to the cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
| MANAGER_IMG | Specifies the eraser manager image version to be used for deployment |
- `make undeploy`
Undeploy controller from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources that need to be removed. |
### Release
- `make release-manifest`
Generates k8s manifests files for a release.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------ |
| NEWVERSION | Sets the new version in the Makefile |
- `make promote-staging-manifest`
Promotes the k8s deployment yaml files to release.
================================================
FILE: docs/versioned_docs/version-v1.2.x/trivy.md
================================================
---
title: Trivy
---
## Trivy Provider Options
The trivy provider is used in Eraser for image scanning and detecting vulnerabilities. See [Customization](https://eraser-dev.github.io/eraser/docs/customization#scanner-options) for more details on configuring the scanner.
================================================
FILE: docs/versioned_docs/version-v1.3.x/architecture.md
================================================
---
title: Architecture
---
At a high level, Eraser has two main modes of operation: manual and automated.
Manual image removal involves supplying a list of images to remove; Eraser then
deploys pods to clean up the images you supplied.
Automated image removal runs on a timer. By default, the automated process
removes images based on the results of a vulnerability scan. The default
vulnerability scanner is Trivy, but others can be provided in its place. Or,
the scanner can be disabled altogether, in which case Eraser acts as a garbage
collector -- it will remove all non-running images in your cluster.
## Manual image cleanup
## Automated analysis, scanning, and cleanup
================================================
FILE: docs/versioned_docs/version-v1.3.x/code-of-conduct.md
================================================
---
title: Code of Conduct
---
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
Resources:
- [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
- [Code of Conduct Reporting](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
================================================
FILE: docs/versioned_docs/version-v1.3.x/contributing.md
================================================
---
title: Contributing
---
There are several ways to get involved with Eraser
- Join the [mailing list](https://groups.google.com/u/1/g/eraser-dev) to get notifications for releases, security announcements, etc.
- Participate in the [biweekly community meetings](https://docs.google.com/document/d/1Sj5u47K3WUGYNPmQHGFpb52auqZb1FxSlWAQnPADhWI/edit) to disucss development, issues, use cases, etc.
- Join the `#eraser` channel on the [Kubernetes Slack](https://slack.k8s.io/)
- View the [development setup instructions](https://eraser-dev.github.io/eraser/docs/development)
This project welcomes contributions and suggestions.
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
================================================
FILE: docs/versioned_docs/version-v1.3.x/custom-scanner.md
================================================
---
title: Custom Scanner
---
## Creating a Custom Scanner
To create a custom scanner for non-compliant images, use the following [template](https://github.com/eraser-dev/eraser-scanner-template/).
In order to customize your scanner, start by creating a `NewImageProvider()`. The ImageProvider interface can be found can be found [here](../../../pkg/scanners/template/scanner_template.go).
The ImageProvider will allow you to retrieve the list of all non-running and non-excluded images from the collector container through the `ReceiveImages()` function. Process these images with your customized scanner and threshold, and use `SendImages()` to pass the images found non-compliant to the eraser container for removal. Finally, complete the scanning process by calling `Finish()`.
When complete, provide your custom scanner image to Eraser in deployment.
================================================
FILE: docs/versioned_docs/version-v1.3.x/customization.md
================================================
---
title: Customization
---
## Overview
Eraser uses a configmap to configure its behavior. The configmap is part of the
deployment and it is not necessary to deploy it manually. Once deployed, the configmap
can be edited at any time:
```bash
kubectl edit configmap --namespace eraser-system eraser-manager-config
```
If an eraser job is already running, the changes will not take effect until the job completes.
The configuration is in yaml.
## Key Concepts
### Basic architecture
The _manager_ runs as a pod in your cluster and manages _ImageJobs_. Think of
an _ImageJob_ as a unit of work, performed on every node in your cluster. Each
node runs a sub-job. The goal of the _ImageJob_ is to assess the images on your
cluster's nodes, and to remove the images you don't want. There are two stages:
1. Assessment
1. Removal.
### Scheduling
An _ImageJob_ can either be created on-demand (see [Manual Removal](https://eraser-dev.github.io/eraser/docs/manual-removal)),
or they can be spawned on a timer like a cron job. On-demand jobs skip the
assessment stage and get right down to the business of removing the images you
specified. The behavior of an on-demand job is quite different from that of
timed jobs.
### Fault Tolerance
Because an _ImageJob_ runs on every node in your cluster, and the conditions on
each node may vary widely, some of the sub-jobs may fail. If you cannot
tolerate any failure, set the `manager.imageJob.successRatio` property to
`1.0`. If 75% success sounds good to you, set it to `0.75`. In that case, if
fewer than 75% of the pods spawned by the _ImageJob_ report success, the job as
a whole will be marked as a failure.
This is mainly to help diagnose error conditions. As such, you can set
`manager.imageJob.cleanup.delayOnFailure` to a long value so that logs can be
captured before the spawned pods are cleaned up.
### Excluding Nodes
For various reasons, you may want to prevent Eraser from scheduling pods on
certain nodes. To do so, the nodes can be given a special label. By default,
this label is `eraser.sh/cleanup.filter`, but you can configure the behavior with
the options under `manager.nodeFilter`. The [table](#detailed-options) provides more detail.
### Configuring Components
An _ImageJob_ is made up of various sub-jobs, with one sub-job for each node.
These sub-jobs can be broken down further into three stages.
1. Collection (What is on the node?)
1. Scanning (What images conform to the policy I've provided?)
1. Removal (Remove images based on the results of the above)
Of the above stages, only Removal is mandatory. The others can be disabled.
Furthermore, manually triggered _ImageJobs_ will skip right to removal, even if
Eraser is configured to collect and scan. Collection and Scanning will only
take place when:
1. The collector and/or scanner `components` are enabled, AND
1. The job was *not* triggered manually by creating an _ImageList_.
Disabling scanner will remove all non-running images by default.
### Swapping out components
The collector, scanner, and remover components can all be swapped out. This
enables you to build and host the images yourself. In addition, the scanner's
behavior can be completely tailored to your needs by swapping out the default
image with one of your own. To specify the images, use the
`components..image.repo` and `components..image.tag`,
where `` is one of `collector`, `scanner`, or `remover`.
## Universal Options
The following portions of the configmap apply no matter how you spawn your
_ImageJob_. The values provided below are the defaults. For more detail on
these options, see the [table](#detailed-options).
```yaml
manager:
runtime:
name: containerd
address: unix:///run/containerd/containerd.sock
otlpEndpoint: "" # empty string disables OpenTelemetry
logLevel: info
profile:
enabled: false
port: 6060
imageJob:
successRatio: 1.0
cleanup:
delayOnSuccess: 0s
delayOnFailure: 24h
pullSecrets: [] # image pull secrets for collector/scanner/remover
priorityClassName: "" # priority class name for collector/scanner/remover
nodeFilter:
type: exclude # must be either exclude|include
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
components:
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Component Options
```yaml
components:
collector:
enabled: true
image:
repo: ghcr.io/eraser-dev/collector
tag: v1.0.0
request:
mem: 25Mi
cpu: 7m
limit:
mem: 500Mi
cpu: 0
scanner:
enabled: true
image:
repo: ghcr.io/eraser-dev/eraser-trivy-scanner
tag: v1.0.0
request:
mem: 500Mi
cpu: 1000m
limit:
mem: 2Gi
cpu: 0
config: |
# this is the schema for the provided 'trivy-scanner'. custom scanners
# will define their own configuration. see the below
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Scanner Options
These options can be provided to `components.scanner.config`. They will be
passed through as a string to the scanner container and parsed there. If you
want to configure your own scanner, you must provide some way to parse this.
Below are the values recognized by the provided `eraser-trivy-scanner` image.
Values provided below are the defaults.
```yaml
cacheDir: /var/lib/trivy # The file path inside the container to store the cache
dbRepo: ghcr.io/aquasecurity/trivy-db # The container registry from which to fetch the trivy database
deleteFailedImages: true # if true, remove images for which scanning fails, regardless of why it failed
deleteEOLImages: true # if true, remove images that have reached their end-of-life date
vulnerabilities:
ignoreUnfixed: true # consider the image compliant if there are no known fixes for the vulnerabilities found.
types: # a list of vulnerability types. for more info, see trivy's documentation.
- os
- library
securityChecks: # see trivy's documentation for more information
- vuln
severities: # in this case, only flag images with CRITICAL vulnerability for removal
- CRITICAL
ignoredStatuses: # a list of trivy statuses to ignore. See https://aquasecurity.github.io/trivy/v0.44/docs/configuration/filtering/#by-status.
timeout:
total: 23h # if scanning isn't completed before this much time elapses, abort the whole scan
perImage: 1h # if scanning a single image exceeds this time, scanning will be aborted
```
## Detailed Options
| Option | Description | Default |
| --- | --- | --- |
| manager.runtime.name | The runtime to use for the manager's containers. Must be one of containerd, crio, or dockershim. It is assumed that your nodes are all using the same runtime, and there is currently no way to configure multiple runtimes. | containerd |
| manager.runtime.address | The runtime socket address to use for the containers. Can provide a custom address for containerd and dockershim runtimes, but not for crio due to Trivy restrictions. | unix:///run/containerd/containerd.sock |
| manager.otlpEndpoint | The endpoint to send OpenTelemetry data to. If empty, data will not be sent. | "" |
| manager.logLevel | The log level for the manager's containers. Must be one of debug, info, warn, error, dpanic, panic, or fatal. | info |
| manager.scheduling.repeatInterval | Use only when collector ando/or scanner are enabled. This is like a cron job, and will spawn an _ImageJob_ at the interval provided. | 24h |
| manager.scheduling.beginImmediately | If set to true, the fist _ImageJob_ will run immediately. If false, the job will not be spawned until after the interval (above) has elapsed. | true |
| manager.profile.enabled | Whether to enable profiling for the manager's containers. This is for debugging with `go tool pprof`. | false |
| manager.profile.port | The port on which to expose the profiling endpoint. | 6060 |
| manager.imageJob.successRatio | The ratio of successful image jobs required before a cleanup is performed. | 1.0 |
| manager.imageJob.cleanup.delayOnSuccess | The amount of time to wait after a successful image job before performing cleanup. | 0s |
| manager.imageJob.cleanup.delayOnFailure | The amount of time to wait after a failed image job before performing cleanup. | 24h |
| manager.pullSecrets | The image pull secrets to use for collector, scanner, and remover containers. | [] |
| manager.priorityClassName | The priority class to use for collector, scanner, and remover containers. | "" |
| manager.nodeFilter.type | The type of node filter to use. Must be either "exclude" or "include". | exclude |
| manager.nodeFilter.selectors | A list of selectors used to filter nodes. | [] |
| components.collector.enabled | Whether to enable the collector component. | true |
| components.collector.image.repo | The repository containing the collector image. | ghcr.io/eraser-dev/collector |
| components.collector.image.tag | The tag of the collector image. | v1.0.0 |
| components.collector.request.mem | The amount of memory to request for the collector container. | 25Mi |
| components.collector.request.cpu | The amount of CPU to request for the collector container. | 7m |
| components.collector.limit.mem | The maximum amount of memory the collector container is allowed to use. | 500Mi |
| components.collector.limit.cpu | The maximum amount of CPU the collector container is allowed to use. | 0 |
| components.scanner.enabled | Whether to enable the scanner component. | true |
| components.scanner.image.repo | The repository containing the scanner image. | ghcr.io/eraser-dev/eraser-trivy-scanner |
| components.scanner.image.tag | The tag of the scanner image. | v1.0.0 |
| components.scanner.request.mem | The amount of memory to request for the scanner container. | 500Mi |
| components.scanner.request.cpu | The amount of CPU to request for the scanner container. | 1000m |
| components.scanner.limit.mem | The maximum amount of memory the scanner container is allowed to use. | 2Gi |
| components.scanner.limit.cpu | The maximum amount of CPU the scanner container is allowed to use. | 0 |
| components.scanner.config | The configuration to pass to the scanner container, as a YAML string. | See YAML below |
| components.remover.image.repo | The repository containing the remover image. | ghcr.io/eraser-dev/remover |
| components.remover.image.tag | The tag of the remover image. | v1.0.0 |
| components.remover.request.mem | The amount of memory to request for the remover container. | 25Mi |
| components.remover.request.cpu | The amount of CPU to request for the remover container. | 0 |
================================================
FILE: docs/versioned_docs/version-v1.3.x/exclusion.md
================================================
---
title: Exclusion
---
## Excluding registries, repositories, and images
Eraser can exclude registries (example, `docker.io/library/*`) and also specific images with a tag (example, `docker.io/library/ubuntu:18.04`) or digest (example, `sha256:80f31da1ac7b312ba29d65080fd...`) from its removal process.
To exclude any images or registries from the removal, create configmap(s) with the label `eraser.sh/exclude.list=true` in the eraser-system namespace with a JSON file holding the excluded images.
```bash
$ cat > sample.json <<"EOF"
{
"excluded": [
"docker.io/library/*",
"ghcr.io/eraser-dev/test:latest"
]
}
EOF
$ kubectl create configmap excluded --from-file=sample.json --namespace=eraser-system
$ kubectl label configmap excluded eraser.sh/exclude.list=true -n eraser-system
```
## Exempting Nodes from the Eraser Pipeline
Exempting nodes from cleanup was added in v1.0.0. When deploying Eraser, you can specify whether there is a list of nodes you would like to `include` or `exclude` from the cleanup process using the configmap. For more information, see the section on [customization](https://eraser-dev.github.io/eraser/docs/customization).
================================================
FILE: docs/versioned_docs/version-v1.3.x/faq.md
================================================
---
title: FAQ
---
## Why am I still seeing vulnerable images?
Eraser currently targets **non-running** images, so any vulnerable images that are currently running will not be removed. In addition, the default vulnerability scanning with Trivy removes images with `CRITICAL` vulnerabilities. Any images with lower vulnerabilities will not be removed. This can be configured using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#scanner-options).
## How is Eraser different from Kubernetes garbage collection?
The native garbage collection in Kubernetes works a bit differently than Eraser. By default, garbage collection begins when disk usage reaches 85%, and stops when it gets down to 80%. More details about Kubernetes garbage collection can be found in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/architecture/garbage-collection/), and configuration options can be found in the [Kubelet documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).
There are a couple core benefits to using Eraser for image cleanup:
* Eraser can be configured to use image vulnerability data when making determinations on image removal
* By interfacing directly with the container runtime, Eraser can clean up images that are not managed by Kubelet and Kubernetes
================================================
FILE: docs/versioned_docs/version-v1.3.x/installation.md
================================================
---
title: Installation
---
## Manifest
To install Eraser with the manifest file, run the following command:
```bash
kubectl apply -f https://raw.githubusercontent.com/eraser-dev/eraser/v1.3.0/deploy/eraser.yaml
```
## Helm
If you'd like to install and manage Eraser with Helm, follow the install instructions [here](https://github.com/eraser-dev/eraser/blob/main/charts/eraser/README.md)
================================================
FILE: docs/versioned_docs/version-v1.3.x/introduction.md
================================================
---
title: Introduction
slug: /
---
# Introduction
When deploying to Kubernetes, it's common for pipelines to build and push images to a cluster, but it's much less common for these images to be cleaned up. This can lead to accumulating bloat on the disk, and a host of non-compliant images lingering on the nodes.
The current garbage collection process deletes images based on a percentage of load, but this process does not consider the vulnerability state of the images. **Eraser** aims to provide a simple way to determine the state of an image, and delete it if it meets the specified criteria.
================================================
FILE: docs/versioned_docs/version-v1.3.x/manual-removal.md
================================================
---
title: Manual Removal
---
Create an `ImageList` and specify the images you would like to remove. In this case, the image `docker.io/library/alpine:3.7.3` will be removed.
```shell
cat < `ImageList` is a cluster-scoped resource and must be called imagelist. `"*"` can be specified to remove all non-running images instead of individual images.
Creating an `ImageList` should trigger an `ImageJob` that will deploy Eraser pods on every node to perform the removal given the list of images.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-55d54c4fb6-dcglq 1/1 Running 0 9m8s
eraser-system eraser-kind-control-plane 1/1 Running 0 11s
eraser-system eraser-kind-worker 1/1 Running 0 11s
eraser-system eraser-kind-worker2 1/1 Running 0 11s
```
Pods will run to completion and the images will be removed.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-6d6d5594d4-phl2q 1/1 Running 0 4m16s
eraser-system eraser-kind-control-plane 0/1 Completed 0 22s
eraser-system eraser-kind-worker 0/1 Completed 0 22s
eraser-system eraser-kind-worker2 0/1 Completed 0 22s
```
The `ImageList` custom resource status field will contain the status of the last job. The success and failure counts indicate the number of nodes the Eraser agent was run on.
```shell
$ kubectl describe ImageList imagelist
...
Status:
Failed: 0
Success: 3
Timestamp: 2022-02-25T23:41:55Z
...
```
Verify the unused images are removed.
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
```
If the image has been successfully removed, there will be no output.
================================================
FILE: docs/versioned_docs/version-v1.3.x/metrics.md
================================================
---
title: Metrics
---
To view Eraser metrics, you will need to deploy an Open Telemetry collector in the 'eraser-system' namespace, and an exporter. An example collector with a Prometheus exporter is [otelcollector.yaml](https://github.com/eraser-dev/eraser/blob/main/test/e2e/test-data/otelcollector.yaml), and the endpoint can be specified using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#universal-options). In this example, we are logging the collected data to the otel-collector pod, and exporting metrics through Prometheus at 'http://localhost:8889/metrics', but a separate exporter can also be configured.
Below is the list of metrics provided by Eraser per run:
#### Eraser
```yaml
- count
- name: images_removed_run_total
- description: Total images removed by eraser
```
#### Scanner
```yaml
- count
- name: vulnerable_images_run_total
- description: Total vulnerable images detected
```
#### ImageJob
```yaml
- count
- name: imagejob_run_total
- description: Total ImageJobs scheduled
- name: pods_completed_run_total
- description: Total pods completed
- name: pods_failed_run_total
- description: Total pods failed
- summary
- name: imagejob_duration_run_seconds
- description: Total time for ImageJobs scheduled to complete
```
================================================
FILE: docs/versioned_docs/version-v1.3.x/quick-start.md
================================================
---
title: Quick Start
---
This tutorial demonstrates the functionality of Eraser and validates that non-running images are removed succesfully.
## Deploy a DaemonSet
After following the [install instructions](installation.md), we'll apply a demo `DaemonSet`. For illustrative purposes, a DaemonSet is applied and deleted so the non-running images remain on all nodes. The alpine image with the `3.7.3` tag will be used in this example. This is an image with a known critical vulnerability.
First, apply the `DaemonSet`:
```shell
cat < 45m v1.24.0
kind-worker2 Ready 44m v1.24.0
```
List the images then filter for `alpine`:
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
docker.io/library/alpine:3.7.3 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
docker.io/library/alpine@sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
```
## Automatically Cleaning Images
After deploying Eraser, it will automatically clean images in a regular interval. This interval can be set using the `manager.scheduling.repeatInterval` setting in the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). The default interval is 24 hours (`24h`). Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
Eraser will schedule eraser pods to each node in the cluster, and each pod will contain 3 containers: collector, scanner, and remover that will run to completion.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system eraser-kind-control-plane-sb789 0/3 Completed 0 26m
eraser-system eraser-kind-worker-j84hm 0/3 Completed 0 26m
eraser-system eraser-kind-worker2-4lbdr 0/3 Completed 0 26m
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 26m
```
The collector container sends the list of all images to the scanner container, which scans and reports non-compliant images to the remover container for removal of images that are non-running. Once all pods are completed, they will be automatically cleaned up.
> If you want to remove all the images periodically, you can skip the scanner container by setting the `components.scanner.enabled` value to `false` using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). In this case, each collector pod will hold 2 containers: collector and remover.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system eraser-kind-control-plane-ksk2b 0/2 Completed 0 50s
eraser-system eraser-kind-worker-cpgqc 0/2 Completed 0 50s
eraser-system eraser-kind-worker2-k25df 0/2 Completed 0 50s
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 55s
```
================================================
FILE: docs/versioned_docs/version-v1.3.x/release-management.md
================================================
# Release Management
## Overview
This document describes Eraser project release management, which includes release versioning, supported releases, and supported upgrades.
## Legend
- **X.Y.Z** refers to the version (git tag) of Eraser that is released. This is the version of the Eraser images and the Chart version.
- **Breaking changes** refer to schema changes, flag changes, and behavior changes of Eraser that may require a clean installation during upgrade, and it may introduce changes that could break backward compatibility.
- **Milestone** should be designed to include feature sets to accommodate 2 months release cycles including test gates. GitHub's milestones are used by maintainers to manage each release. PRs and Issues for each release should be created as part of a corresponding milestone.
- **Patch releases** refer to applicable fixes, including security fixes, may be backported to support releases, depending on severity and feasibility.
- **Test gates** should include soak tests and upgrade tests from the last minor version.
## Release Versioning
All releases will be of the form _vX.Y.Z_ where X is the major version, Y is the minor version and Z is the patch version. This project strictly follows semantic versioning.
The rest of the doc will cover the release process for the following kinds of releases:
**Major Releases**
No plan to move to 2.0.0 unless there is a major design change like an incompatible API change in the project
**Minor Releases**
- X.Y.0-alpha.W, W >= 0 (Branch : main)
- Released as needed before we cut a beta X.Y release
- Alpha release, cut from master branch
- X.Y.0-beta.W, W >= 0 (Branch : main)
- Released as needed before we cut a stable X.Y release
- More stable than the alpha release to signal users to test things out
- Beta release, cut from master branch
- X.Y.0-rc.W, W >= 0 (Branch : main)
- Released as needed before we cut a stable X.Y release
- soak for ~ 2 weeks before cutting a stable release
- Release candidate release, cut from master branch
- X.Y.0 (Branch: main)
- Released as needed
- Stable release, cut from master when X.Y milestone is complete
**Patch Releases**
- Patch Releases X.Y.Z, Z > 0 (Branch: release-X.Y, only cut when a patch is needed)
- No breaking changes
- Applicable fixes, including security fixes, may be cherry-picked from master into the latest supported minor release-X.Y branches.
- Patch release, cut from a release-X.Y branch
## Supported Releases
Applicable fixes, including security fixes, may be cherry-picked into the release branch, depending on severity and feasibility. Patch releases are cut from that branch as needed.
We expect users to stay reasonably up-to-date with the versions of Eraser they use in production, but understand that it may take time to upgrade. We expect users to be running approximately the latest patch release of a given minor release and encourage users to upgrade as soon as possible.
We expect to "support" n (current) and n-1 major.minor releases. "Support" means we expect users to be running that version in production. For example, when v1.2.0 comes out, v1.0.x will no longer be supported for patches, and we encourage users to upgrade to a supported version as soon as possible.
## Supported Kubernetes Versions
Eraser is assumed to be compatible with the [current Kubernetes Supported Versions](https://kubernetes.io/releases/patch-releases/#detailed-release-history-for-active-branches) per [Kubernetes Supported Versions policy](https://kubernetes.io/releases/version-skew-policy/).
For example, if Eraser _supported_ versions are v1.2 and v1.1, and Kubernetes _supported_ versions are v1.22, v1.23, v1.24, then all supported Eraser versions (v1.2, v1.1) are assumed to be compatible with all supported Kubernetes versions (v1.22, v1.23, v1.24). If Kubernetes v1.25 is released later, then Eraser v1.2 and v1.1 will be assumed to be compatible with v1.25 if those Eraser versions are still supported at that time.
If you choose to use Eraser with a version of Kubernetes that it does not support, you are using it at your own risk.
## Acknowledgement
This document builds on the ideas and implementations of release processes from projects like Kubernetes and Helm.
================================================
FILE: docs/versioned_docs/version-v1.3.x/releasing.md
================================================
---
title: Releasing
---
## Create Release Pull Request
1. Go to `create_release_pull_request` workflow under actions.
2. Select run workflow, and use the workflow from your branch.
3. Input release version with the semantic version identifying the release.
4. Click run workflow and review the PR created by github-actions.
# Releasing
5. Once the PR is merged to `main`, tag that commit with release version and push tags to remote repository.
```
git checkout
git pull origin
git tag -a -m ''
git push origin
```
6. Pushing the release tag will trigger GitHub Actions to trigger `release` job.
This will build the `ghcr.io/eraser-dev/remover`, `ghcr.io/eraser-dev/eraser-manager`, `ghcr.io/eraser-dev/collector`, and `ghcr.io/eraser-dev/eraser-trivy-scanner` images automatically, then publish the new release tag.
## Publishing
1. GitHub Action will create a new release, review and edit it at https://github.com/eraser-dev/eraser/releases
## Notifying
1. Send an email to the [Eraser mailing list](https://groups.google.com/g/eraser-dev) announcing the release, with links to GitHub.
2. Post a message on the [Eraser Slack channel](https://kubernetes.slack.com/archives/C03Q8KV8YQ4) with the same information.
================================================
FILE: docs/versioned_docs/version-v1.3.x/setup.md
================================================
---
title: Setup
---
# Development Setup
This document describes the steps to get started with development.
You can either utilize [Codespaces](https://docs.github.com/en/codespaces/overview) or setup a local environment.
## Local Setup
### Prerequisites:
- [go](https://go.dev/) with version 1.17 or later.
- [docker](https://docs.docker.com/get-docker/)
- [kind](https://kind.sigs.k8s.io/)
- `make`
### Get things running
- Get dependencies with `go get`
- This project uses `make`. You can utilize `make help` to see available targets. For local deployment make targets help to build, test and deploy.
### Making changes
Please refer to [Development Reference](#development-reference) for more details on the specific commands.
To test your changes on a cluster:
```bash
# generate necessary api files (optional - only needed if changes to api folder).
make generate
# build applicable images
make docker-build-manager MANAGER_IMG=eraser-manager:dev
make docker-build-remover REMOVER_IMG=remover:dev
make docker-build-collector COLLECTOR_IMG=collector:dev
make docker-build-trivy-scanner TRIVY_SCANNER_IMG=eraser-trivy-scanner:dev
# make sure updated image is present on cluster (e.g., see kind example below)
kind load docker-image \
eraser-manager:dev \
eraser-trivy-scanner:dev \
remover:dev \
collector:dev
make manifests
make deploy
# to remove the deployment
make undeploy
```
To test your changes to manager locally:
```bash
make run
```
Example Output:
```
you@local:~/eraser$ make run
docker build . \
-t eraser-tooling \
-f build/tooling/Dockerfile
[+] Building 7.8s (8/8) FINISHED
=> => naming to docker.io/library/eraser-tooling 0.0s
docker run -v /home/eraser/config:/config -w /config/manager \
registry.k8s.io/kustomize/kustomize:v3.8.9 edit set image controller=eraser-manager:dev
docker run -v /home/eraser:/eraser eraser-tooling controller-gen \
crd \
rbac:roleName=manager-role \
webhook \
paths="./..." \
output:crd:artifacts:config=config/crd/bases
rm -rf manifest_staging
mkdir -p manifest_staging/deploy
docker run --rm -v /home/eraser:/eraser \
registry.k8s.io/kustomize/kustomize:v3.8.9 build \
/eraser/config/default -o /eraser/manifest_staging/deploy/eraser.yaml
docker run -v /home/eraser:/eraser eraser-tooling controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
go fmt ./...
go vet ./...
go run ./main.go
{"level":"info","ts":1652985685.1663408,"logger":"controller-runtime.metrics","msg":"Metrics server is starting to listen","addr":":8080"}
...
```
## Development Reference
Eraser is using tooling from [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). For Eraser this tooling is containerized into the `eraser-tooling` image. The `make` targets can use this tooling and build the image when necessary.
You can override the default configuration using environment variables. Below you can find a reference of targets and configuration options.
### Common Configuration
| Environment Variable | Description |
| -------------------- | --------------------------------------------------------------------------------------------- |
| VERSION | Specifies the version (i.e., the image tag) of eraser to be used. |
| MANAGER_IMG | Defines the image url for the Eraser manager. Used for tagging, pulling and pushing the image |
| REMOVER_IMG | Defines the image url for the Eraser. Used for tagging, pulling and pushing the image |
| COLLECTOR_IMG | Defines the image url for the Collector. Used for tagging, pulling and pushing the image |
### Linting
- `make lint`
Lints the go code.
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------- |
| GOLANGCI_LINT | Specifies the go linting binary to be used for linting. |
### Development
- `make generate`
Generates necessary files for the k8s api stored under `api/v1alpha1/zz_generated.deepcopy.go`. See the [kubebuilder docs](https://book.kubebuilder.io/cronjob-tutorial/other-api-files.html) for details.
- `make manifests`
Generates the eraser deployment yaml files under `manifest_staging/deploy`.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------- |
| REMOVER_IMG | Defines the image url for the Eraser. |
| MANAGER_IMG | Defines the image url for the Eraser manager. |
| KUSTOMIZE_VERSION | Define Kustomize version for generating manifests. |
- `make test`
Runs the unit tests for the eraser project.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------- |
| ENVTEST | Specifies the envtest setup binary. |
| ENVTEST_K8S_VERSION | Specifies the Kubernetes version for envtest setup command. |
- `make e2e-test`
Runs e2e tests on a cluster.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------- |
| REMOVER_IMG | Eraser image to be used for e2e test. |
| MANAGER_IMG | Eraser manager image to be used for e2e test. |
| KUBERNETES_VERSION | Kubernetes version for e2e test. |
| TEST_COUNT | Sets repetition for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TIMEOUT | Sets timeout for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TESTFLAGS | Sets additional test flags |
### Build
- `make build`
Builds the eraser manager binaries.
- `make run`
Runs the eraser manager on your local machine.
- `make docker-build-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-remover`
Builds the docker image for eraser remover.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| REMOVER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-remover`
Builds the docker image for the eraser remover.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| REMOVER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
### Deployment
- `make install`
Install CRDs into the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make uninstall`
Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make deploy`
Deploys eraser to the cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
| MANAGER_IMG | Specifies the eraser manager image version to be used for deployment |
- `make undeploy`
Undeploy controller from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources that need to be removed. |
### Release
- `make release-manifest`
Generates k8s manifests files for a release.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------ |
| NEWVERSION | Sets the new version in the Makefile |
- `make promote-staging-manifest`
Promotes the k8s deployment yaml files to release.
================================================
FILE: docs/versioned_docs/version-v1.3.x/trivy.md
================================================
---
title: Trivy
---
## Trivy Provider Options
The Trivy provider is used in Eraser for image scanning and detecting vulnerabilities. See [Customization](https://eraser-dev.github.io/eraser/docs/customization#scanner-options) for more details on configuring the scanner.
================================================
FILE: docs/versioned_docs/version-v1.4.x/architecture.md
================================================
---
title: Architecture
---
At a high level, Eraser has two main modes of operation: manual and automated.
Manual image removal involves supplying a list of images to remove; Eraser then
deploys pods to clean up the images you supplied.
Automated image removal runs on a timer. By default, the automated process
removes images based on the results of a vulnerability scan. The default
vulnerability scanner is Trivy, but others can be provided in its place. Or,
the scanner can be disabled altogether, in which case Eraser acts as a garbage
collector -- it will remove all non-running images in your cluster.
## Manual image cleanup
## Automated analysis, scanning, and cleanup
================================================
FILE: docs/versioned_docs/version-v1.4.x/code-of-conduct.md
================================================
---
title: Code of Conduct
---
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
Resources:
- [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
- [Code of Conduct Reporting](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
================================================
FILE: docs/versioned_docs/version-v1.4.x/contributing.md
================================================
---
title: Contributing
---
There are several ways to get involved with Eraser
- Join the [mailing list](https://groups.google.com/u/1/g/eraser-dev) to get notifications for releases, security announcements, etc.
- Participate in the [biweekly community meetings](https://docs.google.com/document/d/1Sj5u47K3WUGYNPmQHGFpb52auqZb1FxSlWAQnPADhWI/edit) to disucss development, issues, use cases, etc.
- Join the `#eraser` channel on the [Kubernetes Slack](https://slack.k8s.io/)
- View the [development setup instructions](https://eraser-dev.github.io/eraser/docs/development)
This project welcomes contributions and suggestions.
This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
================================================
FILE: docs/versioned_docs/version-v1.4.x/custom-scanner.md
================================================
---
title: Custom Scanner
---
## Creating a Custom Scanner
To create a custom scanner for non-compliant images, use the following [template](https://github.com/eraser-dev/eraser-scanner-template/).
In order to customize your scanner, start by creating a `NewImageProvider()`. The ImageProvider interface can be found can be found [here](../../pkg/scanners/template/scanner_template.go).
The ImageProvider will allow you to retrieve the list of all non-running and non-excluded images from the collector container through the `ReceiveImages()` function. Process these images with your customized scanner and threshold, and use `SendImages()` to pass the images found non-compliant to the eraser container for removal. Finally, complete the scanning process by calling `Finish()`.
When complete, provide your custom scanner image to Eraser in deployment.
================================================
FILE: docs/versioned_docs/version-v1.4.x/customization.md
================================================
---
title: Customization
---
## Overview
Eraser uses a configmap to configure its behavior. The configmap is part of the
deployment and it is not necessary to deploy it manually. Once deployed, the configmap
can be edited at any time:
```bash
kubectl edit configmap --namespace eraser-system eraser-manager-config
```
If an eraser job is already running, the changes will not take effect until the job completes.
The configuration is in yaml.
## Key Concepts
### Basic architecture
The _manager_ runs as a pod in your cluster and manages _ImageJobs_. Think of
an _ImageJob_ as a unit of work, performed on every node in your cluster. Each
node runs a sub-job. The goal of the _ImageJob_ is to assess the images on your
cluster's nodes, and to remove the images you don't want. There are two stages:
1. Assessment
1. Removal.
### Scheduling
An _ImageJob_ can either be created on-demand (see [Manual Removal](https://eraser-dev.github.io/eraser/docs/manual-removal)),
or they can be spawned on a timer like a cron job. On-demand jobs skip the
assessment stage and get right down to the business of removing the images you
specified. The behavior of an on-demand job is quite different from that of
timed jobs.
### Fault Tolerance
Because an _ImageJob_ runs on every node in your cluster, and the conditions on
each node may vary widely, some of the sub-jobs may fail. If you cannot
tolerate any failure, set the `manager.imageJob.successRatio` property to
`1.0`. If 75% success sounds good to you, set it to `0.75`. In that case, if
fewer than 75% of the pods spawned by the _ImageJob_ report success, the job as
a whole will be marked as a failure.
This is mainly to help diagnose error conditions. As such, you can set
`manager.imageJob.cleanup.delayOnFailure` to a long value so that logs can be
captured before the spawned pods are cleaned up.
### Excluding Nodes
For various reasons, you may want to prevent Eraser from scheduling pods on
certain nodes. To do so, the nodes can be given a special label. By default,
this label is `eraser.sh/cleanup.filter`, but you can configure the behavior with
the options under `manager.nodeFilter`. The [table](#detailed-options) provides more detail.
### Configuring Components
An _ImageJob_ is made up of various sub-jobs, with one sub-job for each node.
These sub-jobs can be broken down further into three stages.
1. Collection (What is on the node?)
1. Scanning (What images conform to the policy I've provided?)
1. Removal (Remove images based on the results of the above)
Of the above stages, only Removal is mandatory. The others can be disabled.
Furthermore, manually triggered _ImageJobs_ will skip right to removal, even if
Eraser is configured to collect and scan. Collection and Scanning will only
take place when:
1. The collector and/or scanner `components` are enabled, AND
1. The job was *not* triggered manually by creating an _ImageList_.
Disabling scanner will remove all non-running images by default.
### Swapping out components
The collector, scanner, and remover components can all be swapped out. This
enables you to build and host the images yourself. In addition, the scanner's
behavior can be completely tailored to your needs by swapping out the default
image with one of your own. To specify the images, use the
`components..image.repo` and `components..image.tag`,
where `` is one of `collector`, `scanner`, or `remover`.
## Universal Options
The following portions of the configmap apply no matter how you spawn your
_ImageJob_. The values provided below are the defaults. For more detail on
these options, see the [table](#detailed-options).
```yaml
manager:
runtime:
name: containerd
address: unix:///run/containerd/containerd.sock
otlpEndpoint: "" # empty string disables OpenTelemetry
logLevel: info
profile:
enabled: false
port: 6060
imageJob:
successRatio: 1.0
cleanup:
delayOnSuccess: 0s
delayOnFailure: 24h
pullSecrets: [] # image pull secrets for collector/scanner/remover
priorityClassName: "" # priority class name for collector/scanner/remover
additionalPodLabels: {}
extraScannerVolumes: {}
extraScannerVolumeMounts: {}
nodeFilter:
type: exclude # must be either exclude|include
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
components:
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Component Options
```yaml
components:
collector:
enabled: true
image:
repo: ghcr.io/eraser-dev/collector
tag: v1.0.0
request:
mem: 25Mi
cpu: 7m
limit:
mem: 500Mi
cpu: 0
scanner:
enabled: true
image:
repo: ghcr.io/eraser-dev/eraser-trivy-scanner
tag: v1.0.0
request:
mem: 500Mi
cpu: 1000m
limit:
mem: 2Gi
cpu: 0
config: |
# this is the schema for the provided 'trivy-scanner'. custom scanners
# will define their own configuration. see the below
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.0.0
request:
mem: 25Mi
cpu: 0
limit:
mem: 30Mi
cpu: 1000m
```
## Scanner Options
These options can be provided to `components.scanner.config`. They will be
passed through as a string to the scanner container and parsed there. If you
want to configure your own scanner, you must provide some way to parse this.
Below are the values recognized by the provided `eraser-trivy-scanner` image.
Values provided below are the defaults.
```yaml
cacheDir: /var/lib/trivy # The file path inside the container to store the cache
dbRepo: ghcr.io/aquasecurity/trivy-db # The container registry from which to fetch the trivy database
deleteFailedImages: true # if true, remove images for which scanning fails, regardless of why it failed
deleteEOLImages: true # if true, remove images that have reached their end-of-life date
vulnerabilities:
ignoreUnfixed: true # consider the image compliant if there are no known fixes for the vulnerabilities found.
types: # a list of vulnerability types. for more info, see trivy's documentation.
- os
- library
securityChecks: # see trivy's documentation for more information
- vuln
severities: # in this case, only flag images with CRITICAL vulnerability for removal
- CRITICAL
ignoredStatuses: # a list of trivy statuses to ignore. See https://aquasecurity.github.io/trivy/v0.44/docs/configuration/filtering/#by-status.
timeout:
total: 23h # if scanning isn't completed before this much time elapses, abort the whole scan
perImage: 1h # if scanning a single image exceeds this time, scanning will be aborted
```
## Detailed Options
| Option | Description | Default |
| --- | --- | --- |
| manager.runtime.name | The runtime to use for the manager's containers. Must be one of containerd, crio, or dockershim. It is assumed that your nodes are all using the same runtime, and there is currently no way to configure multiple runtimes. | containerd |
| manager.runtime.address | The runtime socket address to use for the containers. Can provide a custom address for containerd and dockershim runtimes, but not for crio due to Trivy restrictions. | unix:///run/containerd/containerd.sock |
| manager.otlpEndpoint | The endpoint to send OpenTelemetry data to. If empty, data will not be sent. | "" |
| manager.logLevel | The log level for the manager's containers. Must be one of debug, info, warn, error, dpanic, panic, or fatal. | info |
| manager.scheduling.repeatInterval | Use only when collector ando/or scanner are enabled. This is like a cron job, and will spawn an _ImageJob_ at the interval provided. | 24h |
| manager.scheduling.beginImmediately | If set to true, the fist _ImageJob_ will run immediately. If false, the job will not be spawned until after the interval (above) has elapsed. | true |
| manager.profile.enabled | Whether to enable profiling for the manager's containers. This is for debugging with `go tool pprof`. | false |
| manager.profile.port | The port on which to expose the profiling endpoint. | 6060 |
| manager.imageJob.successRatio | The ratio of successful image jobs required before a cleanup is performed. | 1.0 |
| manager.imageJob.cleanup.delayOnSuccess | The amount of time to wait after a successful image job before performing cleanup. | 0s |
| manager.imageJob.cleanup.delayOnFailure | The amount of time to wait after a failed image job before performing cleanup. | 24h |
| manager.pullSecrets | The image pull secrets to use for collector, scanner, and remover containers. | [] |
| manager.priorityClassName | The priority class to use for collector, scanner, and remover containers. | "" |
| manager.additionalPodLabels | Additional labels for all pods that the controller creates at runtime. | `{}` |
| manager.nodeFilter.type | The type of node filter to use. Must be either "exclude" or "include". | exclude |
| manager.nodeFilter.selectors | A list of selectors used to filter nodes. | [] |
| components.collector.enabled | Whether to enable the collector component. | true |
| components.collector.image.repo | The repository containing the collector image. | ghcr.io/eraser-dev/collector |
| components.collector.image.tag | The tag of the collector image. | v1.0.0 |
| components.collector.request.mem | The amount of memory to request for the collector container. | 25Mi |
| components.collector.request.cpu | The amount of CPU to request for the collector container. | 7m |
| components.collector.limit.mem | The maximum amount of memory the collector container is allowed to use. | 500Mi |
| components.collector.limit.cpu | The maximum amount of CPU the collector container is allowed to use. | 0 |
| components.scanner.enabled | Whether to enable the scanner component. | true |
| components.scanner.image.repo | The repository containing the scanner image. | ghcr.io/eraser-dev/eraser-trivy-scanner |
| components.scanner.image.tag | The tag of the scanner image. | v1.0.0 |
| components.scanner.request.mem | The amount of memory to request for the scanner container. | 500Mi |
| components.scanner.request.cpu | The amount of CPU to request for the scanner container. | 1000m |
| components.scanner.limit.mem | The maximum amount of memory the scanner container is allowed to use. | 2Gi |
| components.scanner.limit.cpu | The maximum amount of CPU the scanner container is allowed to use. | 0 |
| components.scanner.config | The configuration to pass to the scanner container, as a YAML string. | See YAML below |
| components.scanner.volumes | Extra volumes for scanner. | `{}` |
| components.remover.image.repo | The repository containing the remover image. | ghcr.io/eraser-dev/remover |
| components.remover.image.tag | The tag of the remover image. | v1.0.0 |
| components.remover.request.mem | The amount of memory to request for the remover container. | 25Mi |
| components.remover.request.cpu | The amount of CPU to request for the remover container. | 0 |
================================================
FILE: docs/versioned_docs/version-v1.4.x/exclusion.md
================================================
---
title: Exclusion
---
## Excluding registries, repositories, and images
Eraser can exclude registries (example, `docker.io/library/*`) and also specific images with a tag (example, `docker.io/library/ubuntu:18.04`) or digest (example, `sha256:80f31da1ac7b312ba29d65080fd...`) from its removal process.
To exclude any images or registries from the removal, create configmap(s) with the label `eraser.sh/exclude.list=true` in the eraser-system namespace with a JSON file holding the excluded images.
```bash
$ cat > sample.json <<"EOF"
{
"excluded": [
"docker.io/library/*",
"ghcr.io/eraser-dev/test:latest"
]
}
EOF
$ kubectl create configmap excluded --from-file=sample.json --namespace=eraser-system
$ kubectl label configmap excluded eraser.sh/exclude.list=true -n eraser-system
```
## Exempting Nodes from the Eraser Pipeline
Exempting nodes from cleanup was added in v1.0.0. When deploying Eraser, you can specify whether there is a list of nodes you would like to `include` or `exclude` from the cleanup process using the configmap. For more information, see the section on [customization](https://eraser-dev.github.io/eraser/docs/customization).
================================================
FILE: docs/versioned_docs/version-v1.4.x/faq.md
================================================
---
title: FAQ
---
## Why am I still seeing vulnerable images?
Eraser currently targets **non-running** images, so any vulnerable images that are currently running will not be removed. In addition, the default vulnerability scanning with Trivy removes images with `CRITICAL` vulnerabilities. Any images with lower vulnerabilities will not be removed. This can be configured using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#scanner-options).
## How is Eraser different from Kubernetes garbage collection?
The native garbage collection in Kubernetes works a bit differently than Eraser. By default, garbage collection begins when disk usage reaches 85%, and stops when it gets down to 80%. More details about Kubernetes garbage collection can be found in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/architecture/garbage-collection/), and configuration options can be found in the [Kubelet documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/).
There are a couple core benefits to using Eraser for image cleanup:
* Eraser can be configured to use image vulnerability data when making determinations on image removal
* By interfacing directly with the container runtime, Eraser can clean up images that are not managed by Kubelet and Kubernetes
================================================
FILE: docs/versioned_docs/version-v1.4.x/installation.md
================================================
---
title: Installation
---
## Manifest
To install Eraser with the manifest file, run the following command:
```bash
kubectl apply -f https://raw.githubusercontent.com/eraser-dev/eraser/v1.4.1/deploy/eraser.yaml
```
## Helm
If you'd like to install and manage Eraser with Helm, follow the install instructions [here](https://github.com/eraser-dev/eraser/blob/main/charts/eraser/README.md)
================================================
FILE: docs/versioned_docs/version-v1.4.x/introduction.md
================================================
---
title: Introduction
slug: /
---
# Introduction
When deploying to Kubernetes, it's common for pipelines to build and push images to a cluster, but it's much less common for these images to be cleaned up. This can lead to accumulating bloat on the disk, and a host of non-compliant images lingering on the nodes.
The current garbage collection process deletes images based on a percentage of load, but this process does not consider the vulnerability state of the images. **Eraser** aims to provide a simple way to determine the state of an image, and delete it if it meets the specified criteria.
================================================
FILE: docs/versioned_docs/version-v1.4.x/manual-removal.md
================================================
---
title: Manual Removal
---
Create an `ImageList` and specify the images you would like to remove. In this case, the image `docker.io/library/alpine:3.7.3` will be removed.
```shell
cat < `ImageList` is a cluster-scoped resource and must be called imagelist. `"*"` can be specified to remove all non-running images instead of individual images.
Creating an `ImageList` should trigger an `ImageJob` that will deploy Eraser pods on every node to perform the removal given the list of images.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-55d54c4fb6-dcglq 1/1 Running 0 9m8s
eraser-system eraser-kind-control-plane 1/1 Running 0 11s
eraser-system eraser-kind-worker 1/1 Running 0 11s
eraser-system eraser-kind-worker2 1/1 Running 0 11s
```
Pods will run to completion and the images will be removed.
```shell
$ kubectl get pods -n eraser-system
eraser-system eraser-controller-manager-6d6d5594d4-phl2q 1/1 Running 0 4m16s
eraser-system eraser-kind-control-plane 0/1 Completed 0 22s
eraser-system eraser-kind-worker 0/1 Completed 0 22s
eraser-system eraser-kind-worker2 0/1 Completed 0 22s
```
The `ImageList` custom resource status field will contain the status of the last job. The success and failure counts indicate the number of nodes the Eraser agent was run on.
```shell
$ kubectl describe ImageList imagelist
...
Status:
Failed: 0
Success: 3
Timestamp: 2022-02-25T23:41:55Z
...
```
Verify the unused images are removed.
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
```
If the image has been successfully removed, there will be no output.
================================================
FILE: docs/versioned_docs/version-v1.4.x/metrics.md
================================================
---
title: Metrics
---
To view Eraser metrics, you will need to deploy an Open Telemetry collector in the 'eraser-system' namespace, and an exporter. An example collector with a Prometheus exporter is [otelcollector.yaml](https://github.com/eraser-dev/eraser/blob/main/test/e2e/test-data/otelcollector.yaml), and the endpoint can be specified using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#universal-options). In this example, we are logging the collected data to the otel-collector pod, and exporting metrics through Prometheus at 'http://localhost:8889/metrics', but a separate exporter can also be configured.
Below is the list of metrics provided by Eraser per run:
#### Eraser
```yaml
- count
- name: images_removed_run_total
- description: Total images removed by eraser
```
#### Scanner
```yaml
- count
- name: vulnerable_images_run_total
- description: Total vulnerable images detected
```
#### ImageJob
```yaml
- count
- name: imagejob_run_total
- description: Total ImageJobs scheduled
- name: pods_completed_run_total
- description: Total pods completed
- name: pods_failed_run_total
- description: Total pods failed
- summary
- name: imagejob_duration_run_seconds
- description: Total time for ImageJobs scheduled to complete
```
================================================
FILE: docs/versioned_docs/version-v1.4.x/quick-start.md
================================================
---
title: Quick Start
---
This tutorial demonstrates the functionality of Eraser and validates that non-running images are removed succesfully.
## Deploy a DaemonSet
After following the [install instructions](installation.md), we'll apply a demo `DaemonSet`. For illustrative purposes, a DaemonSet is applied and deleted so the non-running images remain on all nodes. The alpine image with the `3.7.3` tag will be used in this example. This is an image with a known critical vulnerability.
First, apply the `DaemonSet`:
```shell
cat < 45m v1.24.0
kind-worker2 Ready 44m v1.24.0
```
List the images then filter for `alpine`:
```shell
$ docker exec kind-worker ctr -n k8s.io images list | grep alpine
docker.io/library/alpine:3.7.3 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
docker.io/library/alpine@sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed
```
## Automatically Cleaning Images
After deploying Eraser, it will automatically clean images in a regular interval. This interval can be set using the `manager.scheduling.repeatInterval` setting in the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). The default interval is 24 hours (`24h`). Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
Eraser will schedule eraser pods to each node in the cluster, and each pod will contain 3 containers: collector, scanner, and remover that will run to completion.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system eraser-kind-control-plane-sb789 0/3 Completed 0 26m
eraser-system eraser-kind-worker-j84hm 0/3 Completed 0 26m
eraser-system eraser-kind-worker2-4lbdr 0/3 Completed 0 26m
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 26m
```
The collector container sends the list of all images to the scanner container, which scans and reports non-compliant images to the remover container for removal of images that are non-running. Once all pods are completed, they will be automatically cleaned up.
> If you want to remove all the images periodically, you can skip the scanner container by setting the `components.scanner.enabled` value to `false` using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). In this case, each collector pod will hold 2 containers: collector and remover.
```shell
$ kubectl get pods -n eraser-system
NAMESPACE NAME READY STATUS RESTARTS AGE
eraser-system eraser-kind-control-plane-ksk2b 0/2 Completed 0 50s
eraser-system eraser-kind-worker-cpgqc 0/2 Completed 0 50s
eraser-system eraser-kind-worker2-k25df 0/2 Completed 0 50s
eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 55s
```
================================================
FILE: docs/versioned_docs/version-v1.4.x/release-management.md
================================================
# Release Management
## Overview
This document describes Eraser project release management, which includes release versioning, supported releases, and supported upgrades.
## Legend
- **X.Y.Z** refers to the version (git tag) of Eraser that is released. This is the version of the Eraser images and the Chart version.
- **Breaking changes** refer to schema changes, flag changes, and behavior changes of Eraser that may require a clean installation during upgrade, and it may introduce changes that could break backward compatibility.
- **Milestone** should be designed to include feature sets to accommodate 2 months release cycles including test gates. GitHub's milestones are used by maintainers to manage each release. PRs and Issues for each release should be created as part of a corresponding milestone.
- **Patch releases** refer to applicable fixes, including security fixes, may be backported to support releases, depending on severity and feasibility.
- **Test gates** should include soak tests and upgrade tests from the last minor version.
## Release Versioning
All releases will be of the form _vX.Y.Z_ where X is the major version, Y is the minor version and Z is the patch version. This project strictly follows semantic versioning.
The rest of the doc will cover the release process for the following kinds of releases:
**Major Releases**
No plan to move to 2.0.0 unless there is a major design change like an incompatible API change in the project
**Minor Releases**
- X.Y.0-alpha.W, W >= 0 (Branch : main)
- Released as needed before we cut a beta X.Y release
- Alpha release, cut from master branch
- X.Y.0-beta.W, W >= 0 (Branch : main)
- Released as needed before we cut a stable X.Y release
- More stable than the alpha release to signal users to test things out
- Beta release, cut from master branch
- X.Y.0-rc.W, W >= 0 (Branch : main)
- Released as needed before we cut a stable X.Y release
- soak for ~ 2 weeks before cutting a stable release
- Release candidate release, cut from master branch
- X.Y.0 (Branch: main)
- Released as needed
- Stable release, cut from master when X.Y milestone is complete
**Patch Releases**
- Patch Releases X.Y.Z, Z > 0 (Branch: release-X.Y, only cut when a patch is needed)
- No breaking changes
- Applicable fixes, including security fixes, may be cherry-picked from master into the latest supported minor release-X.Y branches.
- Patch release, cut from a release-X.Y branch
## Supported Releases
Applicable fixes, including security fixes, may be cherry-picked into the release branch, depending on severity and feasibility. Patch releases are cut from that branch as needed.
We expect users to stay reasonably up-to-date with the versions of Eraser they use in production, but understand that it may take time to upgrade. We expect users to be running approximately the latest patch release of a given minor release and encourage users to upgrade as soon as possible.
We expect to "support" n (current) and n-1 major.minor releases. "Support" means we expect users to be running that version in production. For example, when v1.2.0 comes out, v1.0.x will no longer be supported for patches, and we encourage users to upgrade to a supported version as soon as possible.
## Supported Kubernetes Versions
Eraser is assumed to be compatible with the [current Kubernetes Supported Versions](https://kubernetes.io/releases/patch-releases/#detailed-release-history-for-active-branches) per [Kubernetes Supported Versions policy](https://kubernetes.io/releases/version-skew-policy/).
For example, if Eraser _supported_ versions are v1.2 and v1.1, and Kubernetes _supported_ versions are v1.22, v1.23, v1.24, then all supported Eraser versions (v1.2, v1.1) are assumed to be compatible with all supported Kubernetes versions (v1.22, v1.23, v1.24). If Kubernetes v1.25 is released later, then Eraser v1.2 and v1.1 will be assumed to be compatible with v1.25 if those Eraser versions are still supported at that time.
If you choose to use Eraser with a version of Kubernetes that it does not support, you are using it at your own risk.
## Acknowledgement
This document builds on the ideas and implementations of release processes from projects like Kubernetes and Helm.
================================================
FILE: docs/versioned_docs/version-v1.4.x/releasing.md
================================================
---
title: Releasing
---
## Create Release Pull Request
1. Go to `create_release_pull_request` workflow under actions.
2. Select run workflow, and use the workflow from your branch.
3. Input release version with the semantic version identifying the release.
4. Click run workflow and review the PR created by github-actions.
# Releasing
5. Once the PR is merged to `main`, tag that commit with release version and push tags to remote repository.
```
git checkout
git pull origin
git tag -a -m ''
git push origin
```
6. Pushing the release tag will trigger GitHub Actions to trigger `release` job.
This will build the `ghcr.io/eraser-dev/remover`, `ghcr.io/eraser-dev/eraser-manager`, `ghcr.io/eraser-dev/collector`, and `ghcr.io/eraser-dev/eraser-trivy-scanner` images automatically, then publish the new release tag.
## Publishing
1. GitHub Action will create a new release, review and edit it at https://github.com/eraser-dev/eraser/releases
## Notifying
1. Send an email to the [Eraser mailing list](https://groups.google.com/g/eraser-dev) announcing the release, with links to GitHub.
2. Post a message on the [Eraser Slack channel](https://kubernetes.slack.com/archives/C03Q8KV8YQ4) with the same information.
================================================
FILE: docs/versioned_docs/version-v1.4.x/setup.md
================================================
---
title: Setup
---
# Development Setup
This document describes the steps to get started with development.
You can either utilize [Codespaces](https://docs.github.com/en/codespaces/overview) or setup a local environment.
## Local Setup
### Prerequisites:
- [go](https://go.dev/) with version 1.17 or later.
- [docker](https://docs.docker.com/get-docker/)
- [kind](https://kind.sigs.k8s.io/)
- `make`
### Get things running
- Get dependencies with `go get`
- This project uses `make`. You can utilize `make help` to see available targets. For local deployment make targets help to build, test and deploy.
### Making changes
Please refer to [Development Reference](#development-reference) for more details on the specific commands.
To test your changes on a cluster:
```bash
# generate necessary api files (optional - only needed if changes to api folder).
make generate
# build applicable images
make docker-build-manager MANAGER_IMG=eraser-manager:dev
make docker-build-remover REMOVER_IMG=remover:dev
make docker-build-collector COLLECTOR_IMG=collector:dev
make docker-build-trivy-scanner TRIVY_SCANNER_IMG=eraser-trivy-scanner:dev
# make sure updated image is present on cluster (e.g., see kind example below)
kind load docker-image \
eraser-manager:dev \
eraser-trivy-scanner:dev \
remover:dev \
collector:dev
make manifests
make deploy
# to remove the deployment
make undeploy
```
To test your changes to manager locally:
```bash
make run
```
Example Output:
```
you@local:~/eraser$ make run
docker build . \
-t eraser-tooling \
-f build/tooling/Dockerfile
[+] Building 7.8s (8/8) FINISHED
=> => naming to docker.io/library/eraser-tooling 0.0s
docker run -v /home/eraser/config:/config -w /config/manager \
registry.k8s.io/kustomize/kustomize:v3.8.9 edit set image controller=eraser-manager:dev
docker run -v /home/eraser:/eraser eraser-tooling controller-gen \
crd \
rbac:roleName=manager-role \
webhook \
paths="./..." \
output:crd:artifacts:config=config/crd/bases
rm -rf manifest_staging
mkdir -p manifest_staging/deploy
docker run --rm -v /home/eraser:/eraser \
registry.k8s.io/kustomize/kustomize:v3.8.9 build \
/eraser/config/default -o /eraser/manifest_staging/deploy/eraser.yaml
docker run -v /home/eraser:/eraser eraser-tooling controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
go fmt ./...
go vet ./...
go run ./main.go
{"level":"info","ts":1652985685.1663408,"logger":"controller-runtime.metrics","msg":"Metrics server is starting to listen","addr":":8080"}
...
```
## Development Reference
Eraser is using tooling from [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). For Eraser this tooling is containerized into the `eraser-tooling` image. The `make` targets can use this tooling and build the image when necessary.
You can override the default configuration using environment variables. Below you can find a reference of targets and configuration options.
### Common Configuration
| Environment Variable | Description |
| -------------------- | --------------------------------------------------------------------------------------------- |
| VERSION | Specifies the version (i.e., the image tag) of eraser to be used. |
| MANAGER_IMG | Defines the image url for the Eraser manager. Used for tagging, pulling and pushing the image |
| REMOVER_IMG | Defines the image url for the Eraser. Used for tagging, pulling and pushing the image |
| COLLECTOR_IMG | Defines the image url for the Collector. Used for tagging, pulling and pushing the image |
### Linting
- `make lint`
Lints the go code.
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------- |
| GOLANGCI_LINT | Specifies the go linting binary to be used for linting. |
### Development
- `make generate`
Generates necessary files for the k8s api stored under `api/v1alpha1/zz_generated.deepcopy.go`. See the [kubebuilder docs](https://book.kubebuilder.io/cronjob-tutorial/other-api-files.html) for details.
- `make manifests`
Generates the eraser deployment yaml files under `manifest_staging/deploy`.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------- |
| REMOVER_IMG | Defines the image url for the Eraser. |
| MANAGER_IMG | Defines the image url for the Eraser manager. |
| KUSTOMIZE_VERSION | Define Kustomize version for generating manifests. |
- `make test`
Runs the unit tests for the eraser project.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------- |
| ENVTEST | Specifies the envtest setup binary. |
| ENVTEST_K8S_VERSION | Specifies the Kubernetes version for envtest setup command. |
- `make e2e-test`
Runs e2e tests on a cluster.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------- |
| REMOVER_IMG | Eraser image to be used for e2e test. |
| MANAGER_IMG | Eraser manager image to be used for e2e test. |
| KUBERNETES_VERSION | Kubernetes version for e2e test. |
| TEST_COUNT | Sets repetition for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TIMEOUT | Sets timeout for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. |
| TESTFLAGS | Sets additional test flags |
### Build
- `make build`
Builds the eraser manager binaries.
- `make run`
Runs the eraser manager on your local machine.
- `make docker-build-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-manager`
Builds the docker image for the eraser manager.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| MANAGER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-remover`
Builds the docker image for eraser remover.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| REMOVER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-remover`
Builds the docker image for the eraser remover.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| REMOVER_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-build-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ |
| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). |
| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). |
| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). |
| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
- `make docker-push-collector`
Builds the docker image for the eraser collector.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ----------------------------------------------------------------------- |
| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. |
### Deployment
- `make install`
Install CRDs into the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make uninstall`
Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ---------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
- `make deploy`
Deploys eraser to the cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | -------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. |
| MANAGER_IMG | Specifies the eraser manager image version to be used for deployment |
- `make undeploy`
Undeploy controller from the K8s cluster specified in ~/.kube/config.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------------------------------------------- |
| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources that need to be removed. |
### Release
- `make release-manifest`
Generates k8s manifests files for a release.
Configuration Options:
| Environment Variable | Description |
| -------------------- | ------------------------------------ |
| NEWVERSION | Sets the new version in the Makefile |
- `make promote-staging-manifest`
Promotes the k8s deployment yaml files to release.
================================================
FILE: docs/versioned_docs/version-v1.4.x/trivy.md
================================================
---
title: Trivy
---
## Trivy Provider Options
The Trivy provider is used in Eraser for image scanning and detecting vulnerabilities. See [Customization](https://eraser-dev.github.io/eraser/docs/customization#scanner-options) for more details on configuring the scanner.
================================================
FILE: docs/versioned_sidebars/version-v0.4.x-sidebars.json
================================================
{
"sidebar": [
"introduction",
"installation",
"quick-start",
"architecture",
{
"type": "category",
"label": "Topics",
"collapsible": true,
"collapsed": false,
"items": [
"manual-removal",
"exclusion",
"customization"
]
},
{
"type": "category",
"label": "Development",
"collapsible": true,
"collapsed": false,
"items": [
"setup",
"releasing"
]
},
{
"type": "category",
"label": "Scanning",
"collapsible": true,
"collapsed": false,
"items": [
"custom-scanner",
"trivy"
]
},
"faq",
"contributing",
"code-of-conduct"
]
}
================================================
FILE: docs/versioned_sidebars/version-v0.5.x-sidebars.json
================================================
{
"sidebar": [
"introduction",
"installation",
"quick-start",
"architecture",
{
"type": "category",
"label": "Topics",
"collapsible": true,
"collapsed": false,
"items": [
"manual-removal",
"exclusion",
"customization"
]
},
{
"type": "category",
"label": "Development",
"collapsible": true,
"collapsed": false,
"items": [
"setup",
"releasing"
]
},
{
"type": "category",
"label": "Scanning",
"collapsible": true,
"collapsed": false,
"items": [
"custom-scanner",
"trivy"
]
},
"faq",
"contributing",
"code-of-conduct"
]
}
================================================
FILE: docs/versioned_sidebars/version-v1.0.x-sidebars.json
================================================
{
"sidebar": [
"introduction",
"installation",
"quick-start",
"architecture",
{
"type": "category",
"label": "Topics",
"collapsible": true,
"collapsed": false,
"items": [
"manual-removal",
"exclusion",
"customization",
"metrics"
]
},
{
"type": "category",
"label": "Development",
"collapsible": true,
"collapsed": false,
"items": [
"setup",
"releasing"
]
},
{
"type": "category",
"label": "Scanning",
"collapsible": true,
"collapsed": false,
"items": [
"custom-scanner",
"trivy"
]
},
"faq",
"contributing",
"code-of-conduct"
]
}
================================================
FILE: docs/versioned_sidebars/version-v1.1.x-sidebars.json
================================================
{
"sidebar": [
"introduction",
"installation",
"quick-start",
"architecture",
{
"type": "category",
"label": "Topics",
"collapsible": true,
"collapsed": false,
"items": [
"manual-removal",
"exclusion",
"customization",
"metrics"
]
},
{
"type": "category",
"label": "Development",
"collapsible": true,
"collapsed": false,
"items": [
"setup",
"releasing"
]
},
{
"type": "category",
"label": "Scanning",
"collapsible": true,
"collapsed": false,
"items": [
"custom-scanner",
"trivy"
]
},
"faq",
"contributing",
"code-of-conduct"
]
}
================================================
FILE: docs/versioned_sidebars/version-v1.2.x-sidebars.json
================================================
{
"sidebar": [
"introduction",
"installation",
"quick-start",
"architecture",
{
"type": "category",
"label": "Topics",
"collapsible": true,
"collapsed": false,
"items": [
"manual-removal",
"exclusion",
"customization",
"metrics"
]
},
{
"type": "category",
"label": "Development",
"collapsible": true,
"collapsed": false,
"items": [
"setup",
"releasing"
]
},
{
"type": "category",
"label": "Scanning",
"collapsible": true,
"collapsed": false,
"items": [
"custom-scanner",
"trivy"
]
},
"faq",
"contributing",
"code-of-conduct"
]
}
================================================
FILE: docs/versioned_sidebars/version-v1.3.x-sidebars.json
================================================
{
"sidebar": [
"introduction",
"installation",
"quick-start",
"architecture",
{
"type": "category",
"label": "Topics",
"collapsible": true,
"collapsed": false,
"items": [
"manual-removal",
"exclusion",
"customization",
"metrics"
]
},
{
"type": "category",
"label": "Development",
"collapsible": true,
"collapsed": false,
"items": [
"setup",
"releasing"
]
},
{
"type": "category",
"label": "Scanning",
"collapsible": true,
"collapsed": false,
"items": [
"custom-scanner",
"trivy"
]
},
"faq",
"contributing",
"code-of-conduct"
]
}
================================================
FILE: docs/versioned_sidebars/version-v1.4.x-sidebars.json
================================================
{
"sidebar": [
"introduction",
"installation",
"quick-start",
"architecture",
{
"type": "category",
"label": "Topics",
"collapsible": true,
"collapsed": false,
"items": [
"manual-removal",
"exclusion",
"customization",
"metrics"
]
},
{
"type": "category",
"label": "Development",
"collapsible": true,
"collapsed": false,
"items": [
"setup",
"releasing"
]
},
{
"type": "category",
"label": "Scanning",
"collapsible": true,
"collapsed": false,
"items": [
"custom-scanner",
"trivy"
]
},
"faq",
"contributing",
"code-of-conduct"
]
}
================================================
FILE: docs/versions.json
================================================
[
"v1.4.x",
"v1.3.x",
"v1.2.x",
"v1.1.x",
"v1.0.x",
"v0.5.x",
"v0.4.x"
]
================================================
FILE: go.mod
================================================
module github.com/eraser-dev/eraser
go 1.24.0
toolchain go1.24.9
require (
github.com/aquasecurity/trivy v0.51.2
github.com/aquasecurity/trivy-db v0.0.0-20241209111357-8c398f13db0e // indirect
github.com/go-logr/logr v1.4.3
github.com/onsi/ginkgo/v2 v2.14.0
github.com/onsi/gomega v1.30.0
github.com/stretchr/testify v1.10.0
go.opentelemetry.io/otel v1.37.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0
go.opentelemetry.io/otel/metric v1.37.0
go.opentelemetry.io/otel/sdk v1.37.0
go.opentelemetry.io/otel/sdk/metric v1.37.0
go.uber.org/zap v1.27.0
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
golang.org/x/sys v0.38.0
google.golang.org/grpc v1.73.1
k8s.io/api v0.31.2
k8s.io/apimachinery v0.31.2
k8s.io/client-go v0.31.2
// keeping this on 0.25 as updating to 0.26 will remove CRI v1alpha2 version
k8s.io/cri-api v0.27.1
k8s.io/klog/v2 v2.130.1
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
oras.land/oras-go v1.2.5
sigs.k8s.io/controller-runtime v0.16.6
sigs.k8s.io/e2e-framework v0.0.8
sigs.k8s.io/kind v0.15.0
sigs.k8s.io/yaml v1.4.0
)
require (
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
github.com/BurntSushi/toml v1.4.0 // indirect
github.com/Microsoft/hcsshim v0.12.9 // indirect
github.com/alessio/shellescape v1.4.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/containerd v1.7.29 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/cli v27.3.1+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker v28.0.0+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch/v5 v5.8.0 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/go-containerregistry v0.20.2 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20230406165453-00490a63f317 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/package-url/packageurl-go v0.1.2 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.20.5 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/samber/lo v1.47.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/vladimirvivien/gexe v0.1.1 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/otel/trace v1.37.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.45.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/term v0.37.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.38.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.31.1 // indirect
k8s.io/component-base v0.31.2 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)
replace (
// v0.3.1-0.20230104082527-d6f58551be3f is taken from github.com/moby/buildkit v0.11.0
// spdx logic write on v0.3.0 and incompatible with v0.3.1-0.20230104082527-d6f58551be3f
github.com/spdx/tools-golang => github.com/spdx/tools-golang v0.3.0
k8s.io/api => k8s.io/api v0.28.12
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.12
k8s.io/apimachinery => k8s.io/apimachinery v0.28.12
k8s.io/apiserver => k8s.io/apiserver v0.28.12
k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.12
k8s.io/client-go => k8s.io/client-go v0.28.12
k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.12
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.12
k8s.io/code-generator => k8s.io/code-generator v0.28.12
k8s.io/component-base => k8s.io/component-base v0.28.12
k8s.io/component-helpers => k8s.io/component-helpers v0.28.12
k8s.io/controller-manager => k8s.io/controller-manager v0.28.12
// Pin CRI-API to version that still has v1alpha2 runtime API
k8s.io/cri-api => k8s.io/cri-api v0.25.0
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.12
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.12
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.12
k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.12
k8s.io/kubectl => k8s.io/kubectl v0.27.16
k8s.io/kubelet => k8s.io/kubelet v0.27.16
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.16
k8s.io/metrics => k8s.io/metrics v0.27.16
k8s.io/mount-utils => k8s.io/mount-utils v0.27.16
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.16
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.16
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.16
k8s.io/sample-controller => k8s.io/sample-controller v0.27.16
// Updated to fix types.AuthConfig compatibility with Docker v27.x
oras.land/oras-go => oras.land/oras-go v1.2.5
)
================================================
FILE: go.sum
================================================
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
github.com/aquasecurity/trivy v0.51.2 h1:C5rb5TsEiwGEKQzKc4f2qsJVd5uG+C2aMx+zF+7KOWY=
github.com/aquasecurity/trivy v0.51.2/go.mod h1:/O2z/ySpHOiVOpiPGwZny3EFs/7Jis6et0nn6mlf6n4=
github.com/aquasecurity/trivy-db v0.0.0-20241209111357-8c398f13db0e h1:O5j5SeCNBrXApgBTOobO06q4LMxJxIhcSGE7H6Y154E=
github.com/aquasecurity/trivy-db v0.0.0-20241209111357-8c398f13db0e/go.mod h1:gS8VhlNxhraiq60BBnJw9kGtjeMspQ9E8pX24jCL4jg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE=
github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs=
github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII=
github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc=
github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v28.0.0+incompatible h1:Olh0KS820sJ7nPsBKChVhk5pzqcwDR15fumfAd/p9hM=
github.com/docker/docker v28.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro=
github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=
github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU=
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=
github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ=
github.com/google/pprof v0.0.0-20230406165453-00490a63f317/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY=
github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/package-url/packageurl-go v0.1.2 h1:0H2DQt6DHd/NeRlVwW4EZ4oEI6Bn40XlNPRqegcxuo4=
github.com/package-url/packageurl-go v0.1.2/go.mod h1:uQd4a7Rh3ZsVg5j0lNyAfyxIeGde9yrlhjF78GzeW0c=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc=
github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/vladimirvivien/gexe v0.1.1 h1:2A0SBaOSKH+cwLVdt6H+KkHZotZWRNLlWygANGw5DxE=
github.com/vladimirvivien/gexe v0.1.1/go.mod h1:LHQL00w/7gDUKIak24n801ABp8C+ni6eBht9vGVst8w=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0 h1:9PgnL3QNlj10uGxExowIDIZu66aVBwWhXmbOp1pa6RA=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.37.0/go.mod h1:0ineDcLELf6JmKfuo0wvvhAVMuxWFYvkTin2iV4ydPQ=
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.73.1 h1:4fUIxjPNPmuxBHa5OZH4nBgi6pXo1o9rKSqzJF/VrHs=
google.golang.org/grpc v1.73.1/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
k8s.io/api v0.28.12 h1:C2hpsaso18pqn0Dmkfnbv/YCctozTC3KGGuZ6bF7zhQ=
k8s.io/api v0.28.12/go.mod h1:qjswI+whxvf9LAKD4sEYHfy+WgHGWeH+H5sCRQMwZAQ=
k8s.io/apiextensions-apiserver v0.28.12 h1:6GA64rylk5q0mbXfHHFVgfL1jx/4p6RU+Y+ni2DUuZc=
k8s.io/apiextensions-apiserver v0.28.12/go.mod h1:Len29ySvb/fnrXvioTxg2l6iFi97B53Bm3/jBMBllCE=
k8s.io/apimachinery v0.28.12 h1:VepMEVOi9o7L/4wMAXJq+3BK9tqBIeerTB+HSOTKeo0=
k8s.io/apimachinery v0.28.12/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o=
k8s.io/client-go v0.28.12 h1:li7iRPRQF3vDki6gTxT/kXWJvw3BkJSdjVPVhDTZQec=
k8s.io/client-go v0.28.12/go.mod h1:yEzH2Z+nEGlrnKyHJWcJsbOr5tGdIj04dj1TVQOg0wE=
k8s.io/component-base v0.28.12 h1:ZNq6QFFGCPjaAzWqYHaQRoAY5seoK3vP0pZOjgxOzNc=
k8s.io/component-base v0.28.12/go.mod h1:8zI5TmGuHX6R5Lay61Ox7wb+dsEENl0NBmVSiHMQu1c=
k8s.io/cri-api v0.25.0 h1:INwdXsCDSA/0hGNdPxdE2dQD6ft/5K1EaKXZixvSQxg=
k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo=
oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo=
sigs.k8s.io/controller-runtime v0.16.6 h1:FiXwTuFF5ZJKmozfP2Z0j7dh6kmxP4Ou1KLfxgKKC3I=
sigs.k8s.io/controller-runtime v0.16.6/go.mod h1:+dQzkZxnylD0u49e0a+7AR+vlibEBaThmPca7lTyUsI=
sigs.k8s.io/e2e-framework v0.0.8 h1:5cKzNv8d7cAVKrgnYnQxLP0TS1pQbyGNSTk1CI3aV1c=
sigs.k8s.io/e2e-framework v0.0.8/go.mod h1:fIMqwZHUiENwPxMXlsvwPXW5br6T2paZYrWodd/ZoaA=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kind v0.15.0 h1:Fskj234L4hjQlsScCgeYvCBIRt06cjLzc7+kbr1u8Tg=
sigs.k8s.io/kind v0.15.0/go.mod h1:cKTqagdRyUQmihhBOd+7p43DpOPRn9rHsUC08K1Jbsk=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
================================================
FILE: hack/boilerplate.go.txt
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
================================================
FILE: hack/go-install.sh
================================================
#!/usr/bin/env bash
# https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/master/scripts/go_install.sh
set -o errexit
set -o nounset
set -o pipefail
if [[ -z "${1}" ]]; then
echo "must provide module as first parameter"
exit 1
fi
if [[ -z "${2}" ]]; then
echo "must provide binary name as second parameter"
exit 1
fi
if [[ -z "${3}" ]]; then
echo "must provide version as third parameter"
exit 1
fi
if [[ -z "${GOBIN}" ]]; then
echo "GOBIN is not set. Must set GOBIN to install the bin in a specified directory."
exit 1
fi
tmp_dir=$(mktemp -d -t goinstall_XXXXXXXXXX)
function clean {
rm -rf "${tmp_dir}"
}
trap clean EXIT
rm "${GOBIN}/${2}"* || true
cd "${tmp_dir}"
# create a new module in the tmp directory
go mod init fake/mod
# install the golang module specified as the first argument
go install "${1}@${3}"
mv "${GOBIN}/${2}" "${GOBIN}/${2}-${3}"
ln -sf "${GOBIN}/${2}-${3}" "${GOBIN}/${2}"
================================================
FILE: hack/rootless_docker.sh
================================================
#!/usr/bin/env bash
readarray -t uids < <(ps -C dockerd -o uid=)
[[ "${#uids[@]}" != "1" ]] && exit 1
if [ ${uids[0]} -ne 0 ]; then
echo true
exit 0
fi
echo false
exit 1
================================================
FILE: main.go
================================================
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"time"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/utils/inotify"
"sigs.k8s.io/yaml"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
"github.com/eraser-dev/eraser/api/unversioned"
"github.com/eraser-dev/eraser/api/unversioned/config"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
v1alpha1Config "github.com/eraser-dev/eraser/api/v1alpha1/config"
eraserv1alpha2 "github.com/eraser-dev/eraser/api/v1alpha2"
v1alpha2Config "github.com/eraser-dev/eraser/api/v1alpha2/config"
eraserv1alpha3 "github.com/eraser-dev/eraser/api/v1alpha3"
v1alpha3Config "github.com/eraser-dev/eraser/api/v1alpha3/config"
"github.com/eraser-dev/eraser/controllers"
"github.com/eraser-dev/eraser/pkg/logger"
"github.com/eraser-dev/eraser/pkg/utils"
"github.com/eraser-dev/eraser/version"
//+kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
fromV1alpha1 = eraserv1alpha1.Convert_v1alpha1_EraserConfig_To_unversioned_EraserConfig
fromV1alpha2 = eraserv1alpha2.Convert_v1alpha2_EraserConfig_To_unversioned_EraserConfig
fromV1alpha3 = eraserv1alpha3.Convert_v1alpha3_EraserConfig_To_unversioned_EraserConfig
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme))
utilruntime.Must(eraserv1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
type apiVersion struct {
APIVersion string `json:"apiVersion"`
}
type convertFunc[T any] func(*T, *unversioned.EraserConfig, conversion.Scope) error
func main() {
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-ctx.Done()
os.Exit(1)
}()
var configFile string
flag.StringVar(&configFile, "config", "",
"The controller will load its initial configuration from this file. "+
"Omit this flag to use the default configuration values. "+
"Command-line flags override configuration from this file.")
flag.Parse()
if err := logger.Configure(); err != nil {
setupLog.Error(err, "unable to configure logger")
os.Exit(1)
}
// these can all be overwritten using EraserConfig.
options := ctrl.Options{
Scheme: scheme,
Metrics: server.Options{BindAddress: ":8889"},
WebhookServer: webhook.NewServer(webhook.Options{Port: 9443}),
HealthProbeBindAddress: ":8081",
LeaderElection: false,
Cache: cache.Options{
ByObject: map[client.Object]cache.ByObject{
// to watch eraser pods
&corev1.Pod{}: {
Field: fields.OneTermEqualSelector("metadata.namespace", utils.GetNamespace()),
},
// to watch eraser podTemplates
&corev1.PodTemplate{}: {
Field: fields.OneTermEqualSelector("metadata.namespace", utils.GetNamespace()),
},
// to watch eraser-manager-configs
&corev1.ConfigMap{}: {
Field: fields.OneTermEqualSelector("metadata.namespace", utils.GetNamespace()),
},
// to watch ImageJobs
&eraserv1.ImageJob{}: {},
// to watch ImageLists
&eraserv1.ImageList{}: {},
},
},
}
if configFile == "" {
setupLog.Error(fmt.Errorf("config file was not supplied"), "aborting")
os.Exit(1)
}
cfg, err := getConfig(configFile)
if err != nil {
setupLog.Error(err, "error getting configuration")
os.Exit(1)
}
setupLog.V(1).Info("eraser config",
"manager", cfg.Manager,
"components", cfg.Components,
"options", fmt.Sprintf("%#v\n", options),
"typeMeta", fmt.Sprintf("%#v\n", cfg.TypeMeta),
)
eraserOpts := config.NewManager(cfg)
managerOpts := cfg.Manager
watcher, err := setupWatcher(configFile)
if err != nil {
setupLog.Error(err, "unable to set up configuration file watch")
os.Exit(1)
}
go startConfigWatch(cancel, watcher, eraserOpts, configFile)
if managerOpts.Profile.Enabled {
go func() {
server := &http.Server{
Addr: fmt.Sprintf("localhost:%d", managerOpts.Profile.Port),
ReadHeaderTimeout: 3 * time.Second,
}
err := server.ListenAndServe()
setupLog.Error(err, "pprof server failed")
}()
}
config := ctrl.GetConfigOrDie()
config.UserAgent = version.GetUserAgent("manager")
setupLog.Info("setting up manager", "userAgent", config.UserAgent)
mgr, err := ctrl.NewManager(config, options)
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
setupLog.Info("setup controllers")
if err = controllers.SetupWithManager(mgr, eraserOpts); err != nil {
setupLog.Error(err, "unable to setup controllers")
os.Exit(1)
}
//+kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
func getConfig(configFile string) (*unversioned.EraserConfig, error) {
//nolint:gosec // G304: Reading config file is intended functionality
fileBytes, err := os.ReadFile(configFile)
if err != nil {
setupLog.Error(err, "configuration is either missing or invalid")
os.Exit(1)
}
var av apiVersion
if err := yaml.Unmarshal(fileBytes, &av); err != nil {
setupLog.Error(err, "cannot unmarshal yaml", "bytes", string(fileBytes), "apiVersion", av)
os.Exit(1)
}
switch av.APIVersion {
case "eraser.sh/v1alpha1":
return getUnversioned(fileBytes, v1alpha1Config.Default(), fromV1alpha1)
case "eraser.sh/v1alpha2":
return getUnversioned(fileBytes, v1alpha2Config.Default(), fromV1alpha2)
case "eraser.sh/v1alpha3":
return getUnversioned(fileBytes, v1alpha3Config.Default(), fromV1alpha3)
default:
setupLog.Error(fmt.Errorf("unknown api version"), "error", "apiVersion", av.APIVersion)
return nil, err
}
}
func getUnversioned[T any](b []byte, defaults *T, convert convertFunc[T]) (*unversioned.EraserConfig, error) {
cfg := defaults
if err := yaml.Unmarshal(b, cfg); err != nil {
setupLog.Error(err, "configuration is either missing or invalid")
return nil, err
}
var unv unversioned.EraserConfig
if err := convert(cfg, &unv, nil); err != nil {
return nil, err
}
return &unv, nil
}
// Kubernetes manages configmap volume updates by creating a new file,
// changing the symlink, then deleting the old file. Hence, we want to
// watch for IN_DELETE_SELF events. In case the watch is dropped, we need
// to reestablish, so watch of IN_IGNORED too.
// https://ahmet.im/blog/kubernetes-inotify/ for more information.
func setupWatcher(configFile string) (*inotify.Watcher, error) {
watcher, err := inotify.NewWatcher()
if err != nil {
return nil, err
}
err = watcher.AddWatch(configFile, inotify.InDeleteSelf|inotify.InIgnored)
if err != nil {
return nil, err
}
return watcher, nil
}
func startConfigWatch(cancel context.CancelFunc, watcher *inotify.Watcher, eraserOpts *config.Manager, filename string) {
for {
select {
case ev := <-watcher.Event:
// by default inotify removes a watch on a file on an IN_DELETE_SELF
// event, so we have to remove and reinstate the watch
setupLog.V(1).Info("event", "event", ev)
if ev.Mask&inotify.InIgnored != 0 {
err := watcher.RemoveWatch(filename)
if err != nil {
setupLog.Error(err, "unable to remove watch on config")
}
err = watcher.AddWatch(filename, inotify.InDeleteSelf|inotify.InIgnored)
if err != nil {
setupLog.Error(err, "unable to set up new watch on configuration")
}
continue
}
var err error
oldConfig := new(unversioned.EraserConfig)
*oldConfig, err = eraserOpts.Read()
if err != nil {
setupLog.Error(err, "configuration could not be read", "event", ev, "filename", filename)
}
newConfig, err := getConfig(filename)
if err != nil {
setupLog.Error(err, "configuration is missing or invalid", "event", ev, "filename", filename)
continue
}
if err = eraserOpts.Update(newConfig); err != nil {
setupLog.Error(err, "configuration update failed")
continue
}
// read back the new configuration
*newConfig, err = eraserOpts.Read()
if err != nil {
setupLog.Error(err, "unable to read back new configuration")
continue
}
if needsRestart(oldConfig, newConfig) {
setupLog.Info("configurations differ in an irreconcileable way, restarting", "old", oldConfig.Components, "new", newConfig.Components)
// restarts the manager
cancel()
}
setupLog.V(1).Info("new configuration", "manager", newConfig.Manager, "components", newConfig.Components)
case err := <-watcher.Error:
setupLog.Error(err, "file watcher error")
}
}
}
func needsRestart(oldConfig, newConfig *unversioned.EraserConfig) bool {
type check struct {
collector bool
scanner bool
}
oldComponents := check{collector: oldConfig.Components.Collector.Enabled, scanner: oldConfig.Components.Scanner.Enabled}
newComponents := check{collector: newConfig.Components.Collector.Enabled, scanner: newConfig.Components.Scanner.Enabled}
return oldComponents != newComponents
}
================================================
FILE: manifest_staging/deploy/eraser.yaml
================================================
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: eraser-system
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: imagejobs.eraser.sh
spec:
group: eraser.sh
names:
kind: ImageJob
listKind: ImageJobList
plural: imagejobs
singular: imagejob
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ImageJob is the Schema for the imagejobs API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
status:
description: ImageJobStatus defines the observed state of ImageJob.
properties:
deleteAfter:
description: Time to delay deletion until
format: date-time
type: string
desired:
description: desired number of pods
type: integer
failed:
description: number of pods that failed
type: integer
phase:
description: job running, successfully completed, or failed
type: string
skipped:
description: number of nodes that were skipped e.g. because they are not a linux node
type: integer
succeeded:
description: number of pods that completed successfully
type: integer
required:
- desired
- failed
- phase
- skipped
- succeeded
type: object
type: object
served: true
storage: true
subresources:
status: {}
- deprecated: true
deprecationWarning: v1alpha1 of the eraser API has been deprecated. Please migrate to v1.
name: v1alpha1
schema:
openAPIV3Schema:
description: ImageJob is the Schema for the imagejobs API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
status:
description: ImageJobStatus defines the observed state of ImageJob.
properties:
deleteAfter:
description: Time to delay deletion until
format: date-time
type: string
desired:
description: desired number of pods
type: integer
failed:
description: number of pods that failed
type: integer
phase:
description: job running, successfully completed, or failed
type: string
skipped:
description: number of nodes that were skipped e.g. because they are not a linux node
type: integer
succeeded:
description: number of pods that completed successfully
type: integer
required:
- desired
- failed
- phase
- skipped
- succeeded
type: object
type: object
served: true
storage: false
subresources:
status: {}
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.14.0
name: imagelists.eraser.sh
spec:
group: eraser.sh
names:
kind: ImageList
listKind: ImageListList
plural: imagelists
singular: imagelist
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ImageList is the Schema for the imagelists API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: ImageListSpec defines the desired state of ImageList.
properties:
images:
description: The list of non-compliant images to delete if non-running.
items:
type: string
type: array
required:
- images
type: object
status:
description: ImageListStatus defines the observed state of ImageList.
properties:
failed:
description: Number of nodes that failed to run the job
format: int64
type: integer
skipped:
description: Number of nodes that were skipped due to a skip selector
format: int64
type: integer
success:
description: Number of nodes that successfully ran the job
format: int64
type: integer
timestamp:
description: Information when the job was completed.
format: date-time
type: string
required:
- failed
- skipped
- success
- timestamp
type: object
type: object
served: true
storage: true
subresources:
status: {}
- deprecated: true
deprecationWarning: v1alpha1 of the eraser API has been deprecated. Please migrate to v1.
name: v1alpha1
schema:
openAPIV3Schema:
description: ImageList is the Schema for the imagelists API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: ImageListSpec defines the desired state of ImageList.
properties:
images:
description: The list of non-compliant images to delete if non-running.
items:
type: string
type: array
required:
- images
type: object
status:
description: ImageListStatus defines the observed state of ImageList.
properties:
failed:
description: Number of nodes that failed to run the job
format: int64
type: integer
skipped:
description: Number of nodes that were skipped due to a skip selector
format: int64
type: integer
success:
description: Number of nodes that successfully ran the job
format: int64
type: integer
timestamp:
description: Information when the job was completed.
format: date-time
type: string
required:
- failed
- skipped
- success
- timestamp
type: object
type: object
served: true
storage: false
subresources:
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: eraser-controller-manager
namespace: eraser-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: eraser-imagejob-pods
namespace: eraser-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: eraser-manager-role
namespace: eraser-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- podtemplates
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: eraser-manager-role
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagejobs
verbs:
- create
- delete
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagejobs/status
verbs:
- get
- patch
- update
- apiGroups:
- eraser.sh
resources:
- imagelists
verbs:
- get
- list
- watch
- apiGroups:
- eraser.sh
resources:
- imagelists/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: eraser-manager-rolebinding
namespace: eraser-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: eraser-manager-role
subjects:
- kind: ServiceAccount
name: eraser-controller-manager
namespace: eraser-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: eraser-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: eraser-manager-role
subjects:
- kind: ServiceAccount
name: eraser-controller-manager
namespace: eraser-system
---
apiVersion: v1
data:
controller_manager_config.yaml: |
apiVersion: eraser.sh/v1alpha3
kind: EraserConfig
manager:
runtime:
name: containerd
address: unix:///run/containerd/containerd.sock
otlpEndpoint: ""
logLevel: info
scheduling:
repeatInterval: 24h
beginImmediately: true
profile:
enabled: false
port: 6060
imageJob:
successRatio: 1.0
cleanup:
delayOnSuccess: 0s
delayOnFailure: 24h
pullSecrets: [] # image pull secrets for collector/scanner/eraser
priorityClassName: "" # priority class name for collector/scanner/eraser
additionalPodLabels: {}
nodeFilter:
type: exclude # must be either exclude|include
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
components:
collector:
enabled: true
image:
repo: ghcr.io/eraser-dev/collector
tag: v1.5.0-beta.0
request:
mem: 25Mi
cpu: 7m
limit:
mem: 500Mi
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#how-pods-with-resource-limits-are-run
cpu: 0
scanner:
enabled: true
image:
repo: ghcr.io/eraser-dev/eraser-trivy-scanner # supply custom image for custom scanner
tag: v1.5.0-beta.0
request:
mem: 500Mi
cpu: 1000m
limit:
mem: 2Gi
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#how-pods-with-resource-limits-are-run
cpu: 0
# The config needs to be passed through to the scanner as yaml, as a
# single string. Because we allow custom scanner images, the scanner is
# responsible for defining a schema, parsing, and validating.
config: |
# this is the schema for the provided 'trivy-scanner'. custom scanners
# will define their own configuration.
cacheDir: /var/lib/trivy
dbRepo: ghcr.io/aquasecurity/trivy-db
deleteFailedImages: true
deleteEOLImages: true
vulnerabilities:
ignoreUnfixed: false
types:
- os
- library
securityChecks:
- vuln
severities:
- CRITICAL
- HIGH
- MEDIUM
- LOW
ignoredStatuses:
timeout:
total: 23h
perImage: 1h
volumes: []
remover:
image:
repo: ghcr.io/eraser-dev/remover
tag: v1.5.0-beta.0
request:
mem: 25Mi
# https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#how-pods-with-resource-limits-are-run
cpu: 0
limit:
mem: 30Mi
cpu: 0
kind: ConfigMap
metadata:
name: eraser-manager-config
namespace: eraser-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
control-plane: controller-manager
name: eraser-controller-manager
namespace: eraser-system
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
template:
metadata:
labels:
control-plane: controller-manager
spec:
containers:
- args:
- --config=/config/controller_manager_config.yaml
command:
- /manager
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: OTEL_SERVICE_NAME
value: eraser-manager
image: ghcr.io/eraser-dev/eraser-manager:v1.5.0-beta.0
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /config
name: manager-config
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: eraser-controller-manager
terminationGracePeriodSeconds: 10
volumes:
- configMap:
name: eraser-manager-config
name: manager-config
================================================
FILE: pkg/collector/collector.go
================================================
package main
import (
"encoding/json"
"flag"
"fmt"
"io"
"net/http"
_ "net/http/pprof"
"os"
"time"
"github.com/eraser-dev/eraser/pkg/cri"
"github.com/eraser-dev/eraser/pkg/logger"
"golang.org/x/sys/unix"
logf "sigs.k8s.io/controller-runtime/pkg/log"
util "github.com/eraser-dev/eraser/pkg/utils"
)
var (
enableProfile = flag.Bool("enable-pprof", false, "enable pprof profiling")
profilePort = flag.Int("pprof-port", 6060, "port for pprof profiling. defaulted to 6060 if unspecified")
scanDisabled = flag.Bool("scan-disabled", false, "boolean for if scanner container is disabled")
// Timeout of connecting to server (default: 5m).
timeout = 5 * time.Minute
log = logf.Log.WithName("collector")
excluded map[string]struct{}
)
func main() {
flag.Parse()
if *enableProfile {
go func() {
server := &http.Server{
Addr: fmt.Sprintf("localhost:%d", *profilePort),
ReadHeaderTimeout: 3 * time.Second,
}
err := server.ListenAndServe()
log.Error(err, "pprof server failed")
}()
}
if err := logger.Configure(); err != nil {
fmt.Fprintln(os.Stderr, "Error setting up logger:", err)
os.Exit(1)
}
client, err := cri.NewCollectorClient(util.CRIPath)
if err != nil {
log.Error(err, "failed to get image client")
os.Exit(1)
}
excluded, err = util.ParseExcluded()
if os.IsNotExist(err) {
log.Info("configmaps for exclusion do not exist")
} else if err != nil {
log.Error(err, "failed to parse exclusion list")
os.Exit(1)
}
if len(excluded) == 0 {
log.Info("no images to exclude")
}
// finalImages of type []Image
finalImages, err := getImages(client)
if err != nil {
log.Error(err, "failed to list all images")
os.Exit(1)
}
log.Info("images collected", "finalImages:", finalImages)
data, err := json.Marshal(finalImages)
if err != nil {
log.Error(err, "failed to encode finalImages")
os.Exit(1)
}
path := util.CollectScanPath
if *scanDisabled {
path = util.ScanErasePath
}
if err := unix.Mkfifo(path, util.PipeMode); err != nil {
log.Error(err, "failed to create pipe", "pipeFile", path)
os.Exit(1)
}
//nolint:gosec // G304: Opening pipe file is intended functionality
file, err := os.OpenFile(path, os.O_WRONLY, 0)
if err != nil {
log.Error(err, "failed to open pipe", "pipeFile", path)
os.Exit(1)
}
if _, err := file.Write(data); err != nil {
log.Error(err, "failed to write to pipe", "pipeFile", path)
os.Exit(1)
}
if err := file.Close(); err != nil {
log.Error(err, "failed to close pipe", "pipeFile", path)
os.Exit(1)
}
if err := unix.Mkfifo(util.EraseCompleteCollectPath, util.PipeMode); err != nil {
log.Error(err, "failed to create pipe", "pipeFile", util.EraseCompleteCollectPath)
os.Exit(1)
}
file, err = os.OpenFile(util.EraseCompleteCollectPath, os.O_RDONLY, 0)
if err != nil {
log.Error(err, "failed to open pipe", "pipeFile", util.EraseCompleteCollectPath)
os.Exit(1)
}
data, err = io.ReadAll(file)
if err != nil {
log.Error(err, "failed to read pipe", "pipeFile", util.EraseCompleteCollectPath)
os.Exit(1)
}
if err := file.Close(); err != nil {
log.Error(err, "failed to close pipe", "pipeFile", util.EraseCompleteCollectPath)
os.Exit(1)
}
if string(data) != util.EraseCompleteMessage {
log.Info("garbage in pipe", "pipeFile", util.EraseCompleteCollectPath, "in_pipe", string(data))
os.Exit(1)
}
}
================================================
FILE: pkg/collector/helpers.go
================================================
package main
import (
"context"
"github.com/eraser-dev/eraser/api/unversioned"
"github.com/eraser-dev/eraser/pkg/cri"
util "github.com/eraser-dev/eraser/pkg/utils"
)
func getImages(c cri.Collector) ([]unversioned.Image, error) {
backgroundContext, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
images, err := c.ListImages(backgroundContext)
if err != nil {
return nil, err
}
allImages := make([]unversioned.Image, 0, len(images))
// map with key: imageID, value: repoTag list (contains full name of image)
idToImageMap := make(map[string]unversioned.Image)
for _, img := range images {
repoTags := []string{}
repoTags = append(repoTags, img.RepoTags...)
newImg := unversioned.Image{
ImageID: img.Id,
Names: repoTags,
}
digests, errs := util.ProcessRepoDigests(img.RepoDigests)
for _, err := range errs {
log.Error(err, "error processing digest")
}
newImg.Digests = append(newImg.Digests, digests...)
allImages = append(allImages, newImg)
idToImageMap[img.Id] = newImg
}
containers, err := c.ListContainers(backgroundContext)
if err != nil {
return nil, err
}
// Images that are running
// map of (digest | name) -> imageID
runningImages := util.GetRunningImages(containers, idToImageMap)
// Images that aren't running
// map of (digest | name) -> imageID
nonRunningImages := util.GetNonRunningImages(runningImages, allImages, idToImageMap)
finalImages := make([]unversioned.Image, 0, len(images))
// empty map to keep track of repeated digest values due to both name and digest being present as keys in nonRunningImages
checked := make(map[string]struct{})
for _, imageID := range nonRunningImages {
if _, alreadyChecked := checked[imageID]; alreadyChecked {
continue
}
checked[imageID] = struct{}{}
img := idToImageMap[imageID]
currImage := unversioned.Image{
ImageID: imageID,
Names: img.Names,
Digests: img.Digests,
}
if !util.IsExcluded(excluded, currImage.ImageID, idToImageMap) {
finalImages = append(finalImages, currImage)
}
}
return finalImages, nil
}
================================================
FILE: pkg/cri/client.go
================================================
package cri
import (
"context"
"fmt"
"github.com/eraser-dev/eraser/pkg/utils"
"google.golang.org/grpc"
v1 "k8s.io/cri-api/pkg/apis/runtime/v1"
v1alpha2 "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
)
const (
RuntimeV1 runtimeVersion = "v1"
RuntimeV1Alpha2 runtimeVersion = "v1alpha2"
)
type (
Collector interface {
ListImages(context.Context) ([]*v1.Image, error)
ListContainers(context.Context) ([]*v1.Container, error)
}
Remover interface {
Collector
DeleteImage(context.Context, string) error
}
runtimeTryFunc func(context.Context, *grpc.ClientConn) (string, error)
)
func NewCollectorClient(socketPath string) (Collector, error) {
return NewRemoverClient(socketPath)
}
func NewRemoverClient(socketPath string) (Remover, error) {
ctx := context.Background()
conn, err := utils.GetConn(ctx, socketPath)
if err != nil {
return nil, err
}
return newClientWithFallback(ctx, conn)
}
func newClientWithFallback(ctx context.Context, conn *grpc.ClientConn) (Remover, error) {
errs := new(errors)
funcs := []runtimeTryFunc{tryV1, tryV1Alpha2}
for _, f := range funcs {
version, err := f(ctx, conn)
if err != nil {
errs.Append(err)
continue
}
client, err := getClientFromRuntimeVersion(conn, version)
if err != nil {
errs.Append(err)
continue
}
return client, nil
}
return nil, errs
}
func tryV1Alpha2(ctx context.Context, conn *grpc.ClientConn) (string, error) {
runtimeClientV1Alpha2 := v1alpha2.NewRuntimeServiceClient(conn)
req2 := v1alpha2.VersionRequest{}
respv1Alpha2, err := runtimeClientV1Alpha2.Version(ctx, &req2)
if err != nil {
return "", err
}
return respv1Alpha2.RuntimeApiVersion, err
}
func tryV1(ctx context.Context, conn *grpc.ClientConn) (string, error) {
runtimeClient := v1.NewRuntimeServiceClient(conn)
req := v1.VersionRequest{}
resp, err := runtimeClient.Version(ctx, &req)
if err != nil {
return "", err
}
return resp.RuntimeApiVersion, err
}
func getClientFromRuntimeVersion(conn *grpc.ClientConn, runtimeAPIVersion string) (Remover, error) {
switch runtimeAPIVersion {
case string(RuntimeV1):
imageClient := v1.NewImageServiceClient(conn)
runtimeClient := v1.NewRuntimeServiceClient(conn)
return &v1Client{
images: imageClient,
runtime: runtimeClient,
}, nil
case string(RuntimeV1Alpha2):
runtimeClient := v1alpha2.NewRuntimeServiceClient(conn)
imageClient := v1alpha2.NewImageServiceClient(conn)
return &v1alpha2Client{
images: imageClient,
runtime: runtimeClient,
}, nil
}
return nil, fmt.Errorf("unrecognized CRI version: '%s'", runtimeAPIVersion)
}
================================================
FILE: pkg/cri/client_v1.go
================================================
package cri
import (
"context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
v1 "k8s.io/cri-api/pkg/apis/runtime/v1"
)
type (
v1Client struct {
images v1.ImageServiceClient
runtime v1.RuntimeServiceClient
}
)
func (c *v1Client) ListContainers(ctx context.Context) (list []*v1.Container, err error) {
resp, err := c.runtime.ListContainers(ctx, new(v1.ListContainersRequest))
if err != nil {
return nil, err
}
return resp.Containers, nil
}
func (c *v1Client) ListImages(ctx context.Context) (list []*v1.Image, err error) {
request := &v1.ListImagesRequest{Filter: nil}
resp, err := c.images.ListImages(ctx, request)
if err != nil {
return nil, err
}
return resp.Images, nil
}
func (c *v1Client) DeleteImage(ctx context.Context, image string) (err error) {
if image == "" {
return err
}
request := &v1.RemoveImageRequest{Image: &v1.ImageSpec{Image: image}}
_, err = c.images.RemoveImage(ctx, request)
if err != nil {
if status.Code(err) == codes.NotFound {
return nil
}
return err
}
return nil
}
================================================
FILE: pkg/cri/client_v1alpha2.go
================================================
package cri
import (
"context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
v1 "k8s.io/cri-api/pkg/apis/runtime/v1"
v1alpha2 "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
)
type (
v1alpha2Client struct {
images v1alpha2.ImageServiceClient
runtime v1alpha2.RuntimeServiceClient
}
)
func (c *v1alpha2Client) ListContainers(ctx context.Context) (list []*v1.Container, err error) {
resp, err := c.runtime.ListContainers(ctx, new(v1alpha2.ListContainersRequest))
if err != nil {
return nil, err
}
return convertContainers(resp.Containers), nil
}
func (c *v1alpha2Client) ListImages(ctx context.Context) (list []*v1.Image, err error) {
request := &v1alpha2.ListImagesRequest{Filter: nil}
resp, err := c.images.ListImages(ctx, request)
if err != nil {
return nil, err
}
return convertImages(resp.Images), nil
}
func (c *v1alpha2Client) DeleteImage(ctx context.Context, image string) (err error) {
if image == "" {
return err
}
request := &v1alpha2.RemoveImageRequest{Image: &v1alpha2.ImageSpec{Image: image}}
_, err = c.images.RemoveImage(ctx, request)
if err != nil {
if status.Code(err) == codes.NotFound {
return nil
}
return err
}
return nil
}
func convertContainers(list []*v1alpha2.Container) []*v1.Container {
v1s := []*v1.Container{}
for _, c := range list {
v1s = append(v1s, convertContainer(c))
}
return v1s
}
func convertImages(list []*v1alpha2.Image) []*v1.Image {
v1s := []*v1.Image{}
for _, c := range list {
v1s = append(v1s, convertImage(c))
}
return v1s
}
func convertContainer(c *v1alpha2.Container) *v1.Container {
if c == nil {
return nil
}
cont := &v1.Container{
Id: c.Id,
PodSandboxId: c.PodSandboxId,
ImageRef: c.ImageRef,
State: v1.ContainerState(c.State),
CreatedAt: c.CreatedAt,
Labels: c.Labels,
Annotations: c.Annotations,
}
if c.Image != nil {
cont.Image = &v1.ImageSpec{
Image: c.Image.Image,
Annotations: c.Image.Annotations,
}
}
if c.Metadata != nil {
cont.Metadata = &v1.ContainerMetadata{
Name: c.Metadata.Name,
Attempt: c.Metadata.Attempt,
}
}
return cont
}
func convertImage(i *v1alpha2.Image) *v1.Image {
if i == nil {
return nil
}
img := &v1.Image{
Id: i.Id,
RepoTags: i.RepoTags,
RepoDigests: i.RepoDigests,
Size_: i.Size_,
Username: i.Username,
Pinned: i.Pinned,
}
if i.Spec != nil {
img.Spec = &v1.ImageSpec{
Image: i.Spec.Image,
Annotations: i.Spec.Annotations,
}
}
if i.Uid != nil {
img.Uid = &v1.Int64Value{
Value: i.Uid.Value,
}
}
return img
}
================================================
FILE: pkg/cri/util.go
================================================
package cri
import (
"strings"
)
type (
runtimeVersion string
errors []error
)
func (errs errors) Error() string {
s := make([]string, 0, len(errs))
for _, err := range errs {
s = append(s, err.Error())
}
return strings.Join(s, "\n")
}
func (errs *errors) Append(err error) {
if err == nil {
return
}
*errs = append(*errs, err)
}
================================================
FILE: pkg/logger/zap.go
================================================
package logger
import (
"flag"
"fmt"
"github.com/go-logr/logr"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
klog "k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
crzap "sigs.k8s.io/controller-runtime/pkg/log/zap"
)
var logLevel = flag.String("log-level", zapcore.InfoLevel.String(),
fmt.Sprintf("Log verbosity level. Supported values (in order of detail) are %q, %q, %q, and %q.",
zapcore.DebugLevel.String(),
zapcore.InfoLevel.String(),
zapcore.WarnLevel.String(),
zapcore.ErrorLevel.String()))
// GetLevel gets the configured log level.
func GetLevel() string {
return *logLevel
}
// Configure configures a singleton logger for use from controller-runtime.
func Configure() error {
var zapLevel zapcore.Level
if err := zapLevel.UnmarshalText([]byte(*logLevel)); err != nil {
return fmt.Errorf("unable to parse log level: %w: %s", err, *logLevel)
}
var logger logr.Logger
switch zapLevel {
case zap.DebugLevel:
cfg := zap.NewDevelopmentEncoderConfig()
logger = crzap.New(crzap.UseDevMode(true), crzap.Encoder(zapcore.NewConsoleEncoder(cfg)), crzap.Level(zapLevel))
ctrl.SetLogger(logger)
klog.SetLogger(logger)
default:
cfg := zap.NewProductionEncoderConfig()
logger = crzap.New(crzap.UseDevMode(false), crzap.Encoder(zapcore.NewJSONEncoder(cfg)), crzap.Level(zapLevel))
}
ctrl.SetLogger(logger)
klog.SetLogger(logger)
return nil
}
================================================
FILE: pkg/metrics/metrics.go
================================================
package metrics
import (
"context"
"os"
"github.com/go-logr/logr"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/sdk/instrumentation"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
const (
ImagesRemovedCounter = "images_removed_run_total"
ImagesRemovedDescription = "total images removed"
)
func ConfigureMetrics(ctx context.Context, log logr.Logger, endpoint string) (sdkmetric.Exporter, sdkmetric.Reader, *sdkmetric.MeterProvider) {
exporter, err := otlpmetrichttp.New(ctx, otlpmetrichttp.WithInsecure(), otlpmetrichttp.WithEndpoint(endpoint))
if err != nil {
log.Error(err, "error initializing exporter")
return nil, nil, nil
}
reader := sdkmetric.NewPeriodicReader(exporter)
durationInstrument := sdkmetric.Instrument{
Name: "imagejob_duration_run_seconds",
Scope: instrumentation.Scope{Name: "eraser"},
}
durationStream := sdkmetric.Stream{
Name: "imagejob_duration_run_seconds",
Unit: "s",
Aggregation: sdkmetric.AggregationExplicitBucketHistogram{
Boundaries: []float64{0, 10, 20, 30, 40, 50, 60},
},
}
histogramView := sdkmetric.NewView(durationInstrument, durationStream)
provider := sdkmetric.NewMeterProvider(sdkmetric.WithReader(reader), sdkmetric.WithView(histogramView))
return exporter, reader, provider
}
func ExportMetrics(log logr.Logger, exporter sdkmetric.Exporter, reader sdkmetric.Reader) {
ctxB := context.Background()
var m metricdata.ResourceMetrics
err := reader.Collect(ctxB, &m)
if err != nil {
log.Error(err, "failed to collect metrics")
return
}
if err := exporter.Export(ctxB, &m); err != nil {
log.Error(err, "failed to export metrics")
}
}
func RecordMetricsRemover(ctx context.Context, p metric.MeterProvider, totalRemoved int64) error {
counter, err := p.Meter("eraser").Int64Counter(ImagesRemovedCounter, metric.WithDescription(ImagesRemovedDescription), metric.WithUnit("1"))
if err != nil {
return err
}
counter.Add(ctx, totalRemoved, metric.WithAttributes(attribute.String("node name", os.Getenv("NODE_NAME"))))
return nil
}
func RecordMetricsScanner(ctx context.Context, p metric.MeterProvider, totalVulnerable int) error {
counter, err := p.Meter("eraser").Int64Counter("vulnerable_images_run_total", metric.WithDescription("total vulnerable images"), metric.WithUnit("1"))
if err != nil {
return err
}
counter.Add(ctx, int64(totalVulnerable), metric.WithAttributes(attribute.String("node name", os.Getenv("NODE_NAME"))))
return nil
}
func RecordMetricsController(ctx context.Context, p metric.MeterProvider, jobDuration float64, podsCompleted int64, podsFailed int64) error {
duration, err := p.Meter("eraser").Float64Histogram("imagejob_duration_run_seconds", metric.WithDescription("duration of imagejob"), metric.WithUnit("s"))
if err != nil {
return err
}
duration.Record(ctx, jobDuration)
completed, err := p.Meter("eraser").Int64Counter("pods_completed_run_total", metric.WithDescription("total pods completed"), metric.WithUnit("1"))
if err != nil {
return err
}
completed.Add(ctx, podsCompleted)
failed, err := p.Meter("eraser").Int64Counter("pods_failed_run_total", metric.WithDescription("total pods failed"), metric.WithUnit("1"))
if err != nil {
return err
}
failed.Add(ctx, podsFailed)
jobTotal, err := p.Meter("eraser").Int64Counter("imagejob_run_total", metric.WithDescription("total number of imagejobs completed"), metric.WithUnit("1"))
if err != nil {
return err
}
jobTotal.Add(ctx, 1)
return nil
}
================================================
FILE: pkg/metrics/metrics_test.go
================================================
package metrics
import (
"context"
"testing"
"github.com/go-logr/logr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/metric"
sdkmetric "go.opentelemetry.io/otel/sdk/metric"
"go.opentelemetry.io/otel/sdk/metric/metricdata"
)
func TestConfigureMetrics(t *testing.T) {
exporter, reader, provider := ConfigureMetrics(context.Background(), logr.Discard(), "otel-collector:4318")
if exporter == nil {
t.Fatal("unable to configure exporter")
}
if reader == nil {
t.Fatal("unable to configure exporter")
}
if provider == nil {
t.Fatal("unable to configure exporter")
}
otel.SetMeterProvider(provider)
}
func TestRecordMetrics(t *testing.T) {
if err := RecordMetricsRemover(context.Background(), otel.GetMeterProvider(), 1); err != nil {
t.Fatal("could not record eraser metrics")
}
if err := RecordMetricsScanner(context.Background(), otel.GetMeterProvider(), 1); err != nil {
t.Fatal("could not record scanner metrics")
}
if err := RecordMetricsController(context.Background(), otel.GetMeterProvider(), 1.0, 1, 1); err != nil {
t.Fatal("could not record scanner metrics")
}
}
func TestMeterCreatesInstrument(t *testing.T) {
testCases := []struct {
name string
fn func(*testing.T, metric.Meter)
}{
{
name: "AsyncInt64Count",
fn: func(t *testing.T, m metric.Meter) {
ctr, err := m.Int64Counter(ImagesRemovedCounter)
assert.NoError(t, err)
ctr.Add(context.Background(), 1)
assert.NoError(t, err)
},
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
rdr := sdkmetric.NewManualReader()
m := sdkmetric.NewMeterProvider(sdkmetric.WithReader(rdr)).Meter("eraser")
tt.fn(t, m)
var rm metricdata.ResourceMetrics
err := rdr.Collect(context.Background(), &rm)
assert.NoError(t, err)
require.Len(t, rm.ScopeMetrics, 1)
sm := rm.ScopeMetrics[0]
require.Len(t, sm.Metrics, 1)
got := sm.Metrics[0]
if got.Name != ImagesRemovedCounter {
t.Error("ImagesRemovedCounter not created")
}
})
}
}
================================================
FILE: pkg/scanners/template/scanner_template.go
================================================
package template
import (
"context"
"io"
"os"
"os/signal"
"syscall"
"github.com/eraser-dev/eraser/api/unversioned"
"github.com/go-logr/logr"
"golang.org/x/sys/unix"
"github.com/eraser-dev/eraser/pkg/metrics"
util "github.com/eraser-dev/eraser/pkg/utils"
"go.opentelemetry.io/otel"
logf "sigs.k8s.io/controller-runtime/pkg/log"
)
// interface for custom scanners to communicate with Eraser.
type ImageProvider interface {
// receive list of all non-running, non-excluded images from collector container to process.
ReceiveImages() ([]unversioned.Image, error)
// sends non-compliant images found to remover container for removal.
SendImages(nonCompliantImages, failedImages []unversioned.Image) error
// completes scanner communication process - required after custom scanning finishes.
Finish() error
}
type config struct {
ctx context.Context
log logr.Logger
deleteScanFailedImages bool
deleteEOLImages bool
reportMetrics bool
}
type ConfigFunc func(*config)
func NewImageProvider(funcs ...ConfigFunc) ImageProvider {
// default config
cfg := &config{
ctx: context.Background(),
log: logf.Log.WithName("scanner"),
deleteScanFailedImages: true,
reportMetrics: false,
}
// apply user config
for _, f := range funcs {
f(cfg)
}
return cfg
}
func (cfg *config) ReceiveImages() ([]unversioned.Image, error) {
var err error
if err := unix.Mkfifo(util.EraseCompleteScanPath, util.PipeMode); err != nil {
cfg.log.Error(err, "failed to create pipe", "pipeName", util.EraseCompleteScanPath)
return nil, err
}
err = os.Chmod(util.EraseCompleteScanPath, 0o600)
if err != nil {
cfg.log.Error(err, "unable to enable pipe for writing", "pipeName", util.EraseCompleteScanPath)
return nil, err
}
allImages, err := util.ReadCollectScanPipe(cfg.ctx)
if err != nil {
cfg.log.Error(err, "unable to read images from collect scan pipe")
return nil, err
}
return allImages, nil
}
func (cfg *config) SendImages(nonCompliantImages, failedImages []unversioned.Image) error {
if cfg.deleteScanFailedImages {
nonCompliantImages = append(nonCompliantImages, failedImages...)
}
if err := util.WriteScanErasePipe(nonCompliantImages); err != nil {
cfg.log.Error(err, "unable to write non-compliant images to scan erase pipe")
return err
}
if cfg.reportMetrics {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer cancel()
exporter, reader, provider := metrics.ConfigureMetrics(ctx, cfg.log, os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT"))
otel.SetMeterProvider(provider)
if err := metrics.RecordMetricsScanner(ctx, otel.GetMeterProvider(), len(nonCompliantImages)); err != nil {
cfg.log.Error(err, "error recording metrics")
return err
}
metrics.ExportMetrics(cfg.log, exporter, reader)
}
return nil
}
func (cfg *config) Finish() error {
file, err := os.OpenFile(util.EraseCompleteScanPath, os.O_RDONLY, 0)
if err != nil {
cfg.log.Error(err, "failed to open pipe", "pipeName", util.EraseCompleteScanPath)
return err
}
data, err := io.ReadAll(file)
if err != nil {
cfg.log.Error(err, "failed to read pipe", "pipeName", util.EraseCompleteScanPath)
return err
}
if err := file.Close(); err != nil {
cfg.log.Error(err, "failed to close pipe", "pipeName", util.EraseCompleteScanPath)
return err
}
if string(data) != util.EraseCompleteMessage {
cfg.log.Info("garbage in pipe", "pipeName", util.EraseCompleteScanPath, "in_pipe", string(data))
return err
}
cfg.log.Info("scanning complete, exiting")
return nil
}
// provide custom context.
func WithContext(ctx context.Context) ConfigFunc {
return func(cfg *config) {
cfg.ctx = ctx
}
}
// sets deleteScanFailedImages flag.
func WithDeleteScanFailedImages(deleteScanFailedImages bool) ConfigFunc {
return func(cfg *config) {
cfg.deleteScanFailedImages = deleteScanFailedImages
}
}
// sets deleteEOLimages flag.
func WithDeleteEOLImages(deleteEOLImages bool) ConfigFunc {
return func(cfg *config) {
cfg.deleteEOLImages = deleteEOLImages
}
}
// provide custom logger.
func WithLogger(log logr.Logger) ConfigFunc {
return func(cfg *config) {
cfg.log = log
}
}
// sets boolean for recording metrics.
func WithMetrics(reportMetrics bool) ConfigFunc {
return func(cfg *config) {
cfg.reportMetrics = reportMetrics
}
}
================================================
FILE: pkg/scanners/trivy/helpers.go
================================================
package main
import (
"os"
unversioned "github.com/eraser-dev/eraser/api/unversioned"
"k8s.io/apimachinery/pkg/util/yaml"
)
func loadConfig(filename string) (Config, error) {
cfg := *DefaultConfig()
//nolint:gosec // G304: Reading config file is intended functionality
b, err := os.ReadFile(filename)
if err != nil {
log.Error(err, "unable to read eraser config")
return cfg, err
}
var eraserConfig unversioned.EraserConfig
err = yaml.Unmarshal(b, &eraserConfig)
if err != nil {
log.Error(err, "unable to unmarshal eraser config")
}
scanCfgYaml := eraserConfig.Components.Scanner.Config
scanCfgBytes := []byte("")
if scanCfgYaml != nil {
scanCfgBytes = []byte(*scanCfgYaml)
}
err = yaml.Unmarshal(scanCfgBytes, &cfg)
if err != nil {
log.Error(err, "unable to unmarshal scanner config")
return cfg, err
}
return cfg, nil
}
================================================
FILE: pkg/scanners/trivy/trivy.go
================================================
package main
import (
"context"
"errors"
"flag"
"fmt"
"net/http"
"os"
"time"
"github.com/eraser-dev/eraser/api/unversioned"
_ "net/http/pprof"
trivylogger "github.com/aquasecurity/trivy/pkg/log"
"github.com/eraser-dev/eraser/pkg/logger"
"github.com/eraser-dev/eraser/pkg/scanners/template"
"github.com/eraser-dev/eraser/pkg/utils"
logf "sigs.k8s.io/controller-runtime/pkg/log"
)
const (
generalErr = 1
severityCritical = "CRITICAL"
severityHigh = "HIGH"
severityMedium = "MEDIUM"
severityLow = "LOW"
severityUnknown = "UNKNOWN"
vulnTypeOs = "os"
vulnTypeLibrary = "library"
securityCheckVuln = "vuln"
securityCheckConfig = "config"
securityCheckSecret = "secret"
statusUnknown = "unknown"
statusAffected = "affected"
statusFixed = "fixed"
statusUnderInvestigation = "under_investigation"
statusWillNotFix = "will_not_fix"
statusFixDeferred = "fix_deferred"
statusEndOfLife = "end_of_life"
)
var (
config = flag.String("config", "", "path to the configuration file")
enableProfile = flag.Bool("enable-pprof", false, "enable pprof profiling")
profilePort = flag.Int("pprof-port", 6060, "port for pprof profiling. defaulted to 6060 if unspecified")
log = logf.Log.WithName("scanner").WithValues("provider", "trivy")
// This can be overwritten by the linker.
trivyVersion = "dev"
)
func main() {
flag.Parse()
err := logger.Configure()
if err != nil {
fmt.Fprintf(os.Stderr, "error setting up logger: %s", err)
os.Exit(generalErr)
}
log.Info("trivy version", "trivy version", trivyVersion)
log.Info("config", "config", *config)
userConfig := *DefaultConfig()
if *config != "" {
var err error
userConfig, err = loadConfig(*config)
if err != nil {
log.Error(err, "unable to read config")
os.Exit(generalErr)
}
}
log.V(1).Info("userConfig",
"json", userConfig,
"struct", fmt.Sprintf("%#v\n", userConfig),
)
if *enableProfile {
go runProfileServer()
}
recordMetrics := os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT") != ""
ctx := context.Background()
provider := template.NewImageProvider(
template.WithContext(ctx),
template.WithLogger(log),
template.WithMetrics(recordMetrics),
template.WithDeleteScanFailedImages(userConfig.DeleteFailedImages),
template.WithDeleteEOLImages(userConfig.DeleteEOLImages),
)
allImages, err := provider.ReceiveImages()
if err != nil {
log.Error(err, "unable to read images from provider")
os.Exit(generalErr)
}
s, err := initScanner(&userConfig)
if err != nil {
log.Error(err, "error initializing scanner")
}
vulnerableImages, failedImages, err := scan(s, allImages)
if err != nil {
log.Error(err, "total image scan timed out")
}
log.Info("Vulnerable", "Images", vulnerableImages, "Total count", len(vulnerableImages))
if len(failedImages) > 0 {
log.Info("Failed", "Images", failedImages)
}
err = provider.SendImages(vulnerableImages, failedImages)
if err != nil {
log.Error(err, "unable to write images")
}
log.Info("scanning complete, waiting for remover to finish...")
err = provider.Finish()
if err != nil {
log.Error(err, "unable to complete scanning process")
}
log.Info("remover job completed, shutting down...")
}
func runProfileServer() {
server := &http.Server{
Addr: fmt.Sprintf("localhost:%d", *profilePort),
ReadHeaderTimeout: 3 * time.Second,
}
err := server.ListenAndServe()
log.Error(err, "pprof server failed")
}
func initScanner(userConfig *Config) (Scanner, error) {
if userConfig == nil {
return nil, fmt.Errorf("invalid trivy scanner config")
}
trivylogger.InitLogger(false, false)
userConfig.Runtime = unversioned.RuntimeSpec{
Name: unversioned.Runtime(os.Getenv(utils.EnvEraserRuntimeName)),
Address: utils.CRIPath,
}
totalTimeout := time.Duration(userConfig.Timeout.Total)
timer := time.NewTimer(totalTimeout)
var s Scanner = &ImageScanner{
config: *userConfig,
timer: timer,
}
return s, nil
}
func scan(s Scanner, allImages []unversioned.Image) ([]unversioned.Image, []unversioned.Image, error) {
vulnerableImages := make([]unversioned.Image, 0, len(allImages))
failedImages := make([]unversioned.Image, 0, len(allImages))
// track total scan job time
for idx, img := range allImages {
select {
case <-s.Timer().C:
failedImages = append(failedImages, allImages[idx:]...)
return vulnerableImages, failedImages, errors.New("image scan total timeout exceeded")
default:
// Logs scan failures
status, err := s.Scan(img)
if err != nil {
failedImages = append(failedImages, img)
log.Error(err, "scan failed")
continue
}
switch status {
case StatusNonCompliant:
log.Info("vulnerable image found", "img", img)
vulnerableImages = append(vulnerableImages, img)
case StatusFailed:
failedImages = append(failedImages, img)
}
}
}
return vulnerableImages, failedImages, nil
}
================================================
FILE: pkg/scanners/trivy/trivy_test.go
================================================
package main
================================================
FILE: pkg/scanners/trivy/types.go
================================================
package main
import (
"bytes"
"encoding/json"
"fmt"
"os"
"os/exec"
"strings"
"time"
trivyTypes "github.com/aquasecurity/trivy/pkg/types"
"github.com/eraser-dev/eraser/api/unversioned"
"github.com/eraser-dev/eraser/pkg/utils"
)
const (
StatusFailed ScanStatus = iota
StatusNonCompliant
StatusOK
ImgSrcPodman = "podman"
ImgSrcDocker = "docker"
ImgSrcContainerd = "containerd"
)
const (
trivyCommandName = "/trivy"
trivyImageArg = "image"
trivyJSONFormatFlag = "--format=json"
trivyCacheDirFlag = "--cache-dir"
trivyTimeoutFlag = "--timeout"
trivyDBRepoFlag = "--db-repository"
trivyIgnoreUnfixedFlag = "--ignore-unfixed"
trivyVulnTypesFlag = "--vuln-type"
trivySecurityChecksFlag = "--scanners"
trivySeveritiesFlag = "--severity"
trivyRuntimeFlag = "--image-src"
trivyIgnoreStatusFlag = "--ignore-status"
)
type (
Config struct {
Runtime unversioned.RuntimeSpec `json:"runtime,omitempty"`
CacheDir string `json:"cacheDir,omitempty"`
DBRepo string `json:"dbRepo,omitempty"`
DeleteFailedImages bool `json:"deleteFailedImages,omitempty"`
DeleteEOLImages bool `json:"deleteEOLImages,omitempty"`
Vulnerabilities VulnConfig `json:"vulnerabilities,omitempty"`
Timeout TimeoutConfig `json:"timeout,omitempty"`
}
VulnConfig struct {
IgnoreUnfixed bool `json:"ignoreUnfixed,omitempty"`
Types []string `json:"types,omitempty"`
SecurityChecks []string `json:"securityChecks,omitempty"`
Severities []string `json:"severities,omitempty"`
IgnoredStatuses []string `json:"ignoredStatuses,omitempty"`
}
TimeoutConfig struct {
Total unversioned.Duration `json:"total,omitempty"`
PerImage unversioned.Duration `json:"perImage,omitempty"`
}
ScanStatus int
Scanner interface {
Scan(unversioned.Image) (ScanStatus, error)
Timer() *time.Timer
}
)
func DefaultConfig() *Config {
return &Config{
Runtime: unversioned.RuntimeSpec{
Name: unversioned.RuntimeContainerd,
Address: utils.CRIPath,
},
CacheDir: "/var/lib/trivy",
DBRepo: "ghcr.io/aquasecurity/trivy-db",
DeleteFailedImages: true,
DeleteEOLImages: true,
Vulnerabilities: VulnConfig{
IgnoreUnfixed: false,
Types: []string{
vulnTypeOs,
vulnTypeLibrary,
},
SecurityChecks: []string{securityCheckVuln},
Severities: []string{severityCritical, severityHigh, severityMedium, severityLow},
IgnoredStatuses: []string{},
},
Timeout: TimeoutConfig{
Total: unversioned.Duration(time.Hour * 23),
PerImage: unversioned.Duration(time.Hour),
},
}
}
func (c *Config) cliArgs(ref string) []string {
args := []string{}
// Global options
args = append(args, trivyJSONFormatFlag)
if c.CacheDir != "" {
args = append(args, trivyCacheDirFlag, c.CacheDir)
}
if c.Timeout.PerImage != 0 {
args = append(args, trivyTimeoutFlag, time.Duration(c.Timeout.PerImage).String())
}
runtimeVar, err := c.getRuntimeVar()
if err != nil {
log.Error(err, "invalid runtime provided")
}
args = append(args, trivyImageArg, trivyRuntimeFlag, runtimeVar)
if c.DBRepo != "" {
args = append(args, trivyDBRepoFlag, c.DBRepo)
}
if c.Vulnerabilities.IgnoreUnfixed {
args = append(args, trivyIgnoreUnfixedFlag)
}
if len(c.Vulnerabilities.Types) > 0 {
allVulnTypes := strings.Join(c.Vulnerabilities.Types, ",")
args = append(args, trivyVulnTypesFlag, allVulnTypes)
}
if len(c.Vulnerabilities.SecurityChecks) > 0 {
allSecurityChecks := strings.Join(c.Vulnerabilities.SecurityChecks, ",")
args = append(args, trivySecurityChecksFlag, allSecurityChecks)
}
if len(c.Vulnerabilities.Severities) > 0 {
allSeverities := strings.Join(c.Vulnerabilities.Severities, ",")
args = append(args, trivySeveritiesFlag, allSeverities)
}
if len(c.Vulnerabilities.IgnoredStatuses) > 0 {
allIgnoredStatuses := strings.Join(c.Vulnerabilities.IgnoredStatuses, ",")
args = append(args, trivyIgnoreStatusFlag, allIgnoredStatuses)
}
args = append(args, ref)
return args
}
func (c *Config) getRuntimeVar() (string, error) {
var imgsrc string
runtimeName := c.Runtime.Name
switch runtimeName {
case unversioned.RuntimeCrio:
imgsrc = ImgSrcPodman
case unversioned.RuntimeDockerShim:
imgsrc = ImgSrcDocker
case unversioned.RuntimeContainerd, unversioned.Runtime(""):
imgsrc = ImgSrcContainerd
default:
return "", fmt.Errorf("invalid runtime provided: %q", runtimeName)
}
return imgsrc, nil
}
type ImageScanner struct {
config Config
timer *time.Timer
}
func (s *ImageScanner) Scan(img unversioned.Image) (ScanStatus, error) {
refs := make([]string, 0, len(img.Names)+len(img.Digests))
refs = append(refs, img.Digests...)
refs = append(refs, img.Names...)
scanSucceeded := false
log.Info("scanning image with id", "imageID", img.ImageID, "refs", refs)
for i := 0; i < len(refs) && !scanSucceeded; i++ {
log.Info("scanning image with ref", "ref", refs[i])
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
cliArgs := s.config.cliArgs(refs[i])
//nolint:gosec // G204: Trivy subprocess execution is intended functionality
cmd := exec.Command(trivyCommandName, cliArgs...)
cmd.Stdout = stdout
cmd.Stderr = stderr
cmd.Env = append(cmd.Env, os.Environ()...)
cmd.Env = setRuntimeSocketEnvVars(cmd, s.config.Runtime)
log.V(1).Info("scanning image ref", "ref", refs[i], "cli_invocation", fmt.Sprintf("%s %s", trivyCommandName, strings.Join(cliArgs, " ")), "env", cmd.Env)
if err := cmd.Run(); err != nil {
log.Error(err, "error scanning image", "imageID", img.ImageID, "reference", refs[i], "stderr", stderr.String())
continue
}
var report trivyTypes.Report
if err := json.Unmarshal(stdout.Bytes(), &report); err != nil {
log.Error(err, "error unmarshaling report", "imageID", img.ImageID, "reference", refs[i], "report", stdout.String(), "stderr", stderr.String())
continue
}
if s.config.DeleteEOLImages {
if report.Metadata.OS != nil && report.Metadata.OS.Eosl {
log.Info("image is end of life", "imageID", img.ImageID, "reference", refs[i])
return StatusNonCompliant, nil
}
}
for j := range report.Results {
if len(report.Results[j].Vulnerabilities) > 0 {
return StatusNonCompliant, nil
}
}
// causes a break from the loop
scanSucceeded = true
}
status := StatusOK
if !scanSucceeded {
status = StatusFailed
}
return status, nil
}
func setRuntimeSocketEnvVars(cmd *exec.Cmd, runtime unversioned.RuntimeSpec) []string {
envKey := "CONTAINERD_ADDRESS"
envVal := utils.CRIPath
switch runtime.Name {
case unversioned.RuntimeDockerShim:
envKey = "DOCKER_HOST"
case unversioned.RuntimeCrio:
infoParent, err := os.Stat("/run/cri")
if err != nil {
log.Error(err, "unable to get permissions for cri directory")
}
infoSocket, err := os.Stat(utils.CRIPath)
if err != nil {
log.Error(err, "unable to get permissions for cri socket")
}
if err := os.Mkdir("/run/podman", infoParent.Mode().Perm()); err != nil {
log.Error(err, "unable to create /run/podman dir")
}
if err := os.Symlink(utils.CRIPath, "/run/podman/podman.sock"); err != nil {
log.Error(err, "unable to create symlink between CRI path and /run/podman/podman.sock")
}
if err := os.Chmod("/run/podman/podman.sock", infoSocket.Mode().Perm()); err != nil {
log.Error(err, "unable to change /run/podman/podman.sock permissions")
}
envKey = "XDG_RUNTIME_DIR"
envVal = "/run"
}
return append(cmd.Env, fmt.Sprintf("%s=%s", envKey, envVal))
}
func (s *ImageScanner) Timer() *time.Timer {
return s.timer
}
var _ Scanner = &ImageScanner{}
================================================
FILE: pkg/scanners/trivy/types_test.go
================================================
package main
import (
"strings"
"testing"
"github.com/eraser-dev/eraser/api/unversioned"
)
const ref = "image:tag"
var testDuration = unversioned.Duration(100000000000)
func init() {
}
func TestCLIArgs(t *testing.T) {
type testCell struct {
desc string
config Config
expected []string
}
tests := []testCell{
{
desc: "empty config",
config: Config{},
// default container runtime is containerd
expected: []string{"--format=json", "image", "--image-src", ImgSrcContainerd, ref},
},
{
desc: "DeleteFailedImages has no effect",
config: Config{DeleteFailedImages: true},
expected: []string{"--format=json", "image", "--image-src", ImgSrcContainerd, ref},
},
{
desc: "DeleteEOLImages has no effect",
config: Config{DeleteEOLImages: true},
expected: []string{"--format=json", "image", "--image-src", ImgSrcContainerd, ref},
},
{
desc: "alternative runtime crio",
config: Config{Runtime: unversioned.RuntimeSpec{Name: unversioned.RuntimeCrio, Address: unversioned.CrioPath}},
expected: []string{"--format=json", "image", "--image-src", ImgSrcPodman, ref},
},
{
desc: "alternative runtime dockershim",
config: Config{Runtime: unversioned.RuntimeSpec{Name: unversioned.RuntimeDockerShim, Address: unversioned.DockerPath}},
expected: []string{"--format=json", "image", "--image-src", ImgSrcDocker, ref},
},
{
desc: "with cachedir",
config: Config{CacheDir: "/var/lib/trivy"},
expected: []string{"--format=json", "--cache-dir", "/var/lib/trivy", "image", "--image-src", ImgSrcContainerd, ref},
},
{
desc: "with custom db repo",
config: Config{DBRepo: "example.test/db/repo"},
expected: []string{"--format=json", "image", "--image-src", ImgSrcContainerd, "--db-repository", "example.test/db/repo", ref},
},
{
desc: "ignore unfixed",
config: Config{Vulnerabilities: VulnConfig{IgnoreUnfixed: true}},
expected: []string{"--format=json", "image", "--image-src", ImgSrcContainerd, "--ignore-unfixed", ref},
},
{
desc: "specify vulnerability types",
config: Config{Vulnerabilities: VulnConfig{Types: []string{"library", "os"}}},
expected: []string{"--format=json", "image", "--image-src", ImgSrcContainerd, "--vuln-type", "library,os", ref},
},
{
desc: "specify security checks / scanners",
config: Config{Vulnerabilities: VulnConfig{SecurityChecks: []string{"license", "vuln"}}},
expected: []string{"--format=json", "image", "--image-src", ImgSrcContainerd, "--scanners", "license,vuln", ref},
},
{
desc: "specify severities",
config: Config{Vulnerabilities: VulnConfig{Severities: []string{"LOW", "MEDIUM"}}},
expected: []string{"--format=json", "image", "--image-src", ImgSrcContainerd, "--severity", "LOW,MEDIUM", ref},
},
{
desc: "specify statuses to ignore",
config: Config{Vulnerabilities: VulnConfig{IgnoredStatuses: []string{statusUnknown, statusFixed, statusWillNotFix}}},
expected: []string{"--format=json", "image", "--image-src", ImgSrcContainerd, "--ignore-status", "unknown,fixed,will_not_fix", ref},
},
{
desc: "total timeout has no effect",
config: Config{Timeout: TimeoutConfig{Total: testDuration}},
expected: []string{"--format=json", "image", "--image-src", ImgSrcContainerd, ref},
},
{
desc: "per-image timeout",
config: Config{Timeout: TimeoutConfig{PerImage: testDuration}},
expected: []string{"--format=json", "--timeout", "1m40s", "image", "--image-src", ImgSrcContainerd, ref},
},
{
desc: "all global options",
config: Config{CacheDir: "/var/lib/trivy", Timeout: TimeoutConfig{PerImage: testDuration}},
// these are output in a consistent order
expected: []string{"--format=json", "--cache-dir", "/var/lib/trivy", "--timeout", "1m40s", "image", "--image-src", "containerd", ref},
},
{
desc: "all `image` options",
config: Config{
Runtime: unversioned.RuntimeSpec{
Name: unversioned.RuntimeCrio,
Address: unversioned.CrioPath,
},
DBRepo: "example.test/db/repo",
Vulnerabilities: VulnConfig{
IgnoreUnfixed: true,
Types: []string{"library", "os"},
SecurityChecks: []string{"license", "vuln"},
Severities: []string{"LOW", "MEDIUM"},
IgnoredStatuses: []string{statusUnknown, statusFixed},
},
},
expected: []string{
"--format=json", "image", "--image-src", ImgSrcPodman, "--db-repository", "example.test/db/repo", "--ignore-unfixed",
"--vuln-type", "library,os", "--scanners", "license,vuln", "--severity", "LOW,MEDIUM", "--ignore-status", "unknown,fixed", ref,
},
},
{
desc: "all options",
config: Config{
CacheDir: "/var/lib/trivy",
Timeout: TimeoutConfig{PerImage: testDuration},
Runtime: unversioned.RuntimeSpec{
Name: unversioned.RuntimeCrio,
Address: unversioned.CrioPath,
},
DBRepo: "example.test/db/repo",
Vulnerabilities: VulnConfig{
IgnoreUnfixed: true,
Types: []string{"os"},
SecurityChecks: []string{"license", "vuln"},
Severities: []string{"CRITICAL"},
IgnoredStatuses: []string{statusUnknown, statusFixed},
},
},
expected: []string{
"--format=json", "--cache-dir", "/var/lib/trivy", "--timeout", "1m40s", "image", "--image-src", ImgSrcPodman,
"--db-repository", "example.test/db/repo", "--ignore-unfixed", "--vuln-type", "os", "--scanners",
"license,vuln", "--severity", "CRITICAL", "--ignore-status", "unknown,fixed", ref,
},
},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
actual := tt.config.cliArgs(ref)
if len(actual) != len(tt.expected) {
t.Logf("expected resulting length to be %d, was actually %d", len(actual), len(tt.expected))
t.Fail()
}
for i := 0; i < len(actual); i++ {
if actual[i] != tt.expected[i] {
t.Logf("expected argument %s in position %d, was actually %s", tt.expected[i], i, actual[i])
t.Fail()
}
}
if t.Failed() {
t.Logf("expected result `%s`, but got `%s`", strings.Join(tt.expected, " "), strings.Join(actual, " "))
}
})
}
}
================================================
FILE: pkg/utils/flag.go
================================================
package utils
import (
"fmt"
)
type MultiFlag []string
func (nss *MultiFlag) String() string {
return fmt.Sprintf("%#v", nss)
}
func (nss *MultiFlag) Set(s string) error {
*nss = append(*nss, s)
return nil
}
================================================
FILE: pkg/utils/pod_info.go
================================================
package utils
import "os"
func GetNamespace() string {
ns, found := os.LookupEnv("POD_NAMESPACE")
if !found {
return "eraser-system"
}
return ns
}
================================================
FILE: pkg/utils/security_context.go
================================================
package utils
import (
corev1 "k8s.io/api/core/v1"
)
var trueval = true
var SharedSecurityContext = &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
ReadOnlyRootFilesystem: &trueval,
SeccompProfile: &corev1.SeccompProfile{
Type: corev1.SeccompProfileTypeRuntimeDefault,
},
}
================================================
FILE: pkg/utils/utils.go
================================================
package utils
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"net/url"
"os"
"strings"
"time"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
v1 "k8s.io/cri-api/pkg/apis/runtime/v1"
"github.com/eraser-dev/eraser/api/unversioned"
)
const (
// unixProtocol is the network protocol of unix socket.
unixProtocol = "unix"
PipeMode = 0o644
ScanErasePath = "/run/eraser.sh/shared-data/scanErase"
CollectScanPath = "/run/eraser.sh/shared-data/collectScan"
EraseCompleteCollectPath = "/run/eraser.sh/shared-data/eraseCompleteCollect"
EraseCompleteMessage = "complete"
EraseCompleteScanPath = "/run/eraser.sh/shared-data/eraseCompleteScan"
CRIPath = "/run/cri/cri.sock"
EnvEraserRuntimeName = "ERASER_RUNTIME_NAME"
)
type ExclusionList struct {
Excluded []string `json:"excluded"`
}
var (
ErrProtocolNotSupported = errors.New("protocol not supported")
ErrEndpointDeprecated = errors.New("endpoint is deprecated, please consider using full url format")
ErrOnlySupportUnixSocket = errors.New("only support unix socket endpoint")
)
func GetConn(ctx context.Context, socketPath string) (conn *grpc.ClientConn, err error) {
addr, dialer, err := getAddressAndDialer(socketPath)
if err != nil {
return nil, err
}
//nolint:staticcheck // SA1019: grpc.DialContext is deprecated but maintains required blocking behavior
return grpc.DialContext(
ctx,
addr,
//nolint:staticcheck // SA1019: grpc.WithBlock is deprecated but ensures synchronous CRI connection
grpc.WithBlock(),
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(dialer),
)
}
func getAddressAndDialer(endpoint string) (string, func(ctx context.Context, addr string) (net.Conn, error), error) {
protocol, addr, err := ParseEndpointWithFallbackProtocol(endpoint, unixProtocol)
if err != nil {
return "", nil, err
}
if protocol != unixProtocol {
return "", nil, ErrOnlySupportUnixSocket
}
return addr, dial, nil
}
func dial(ctx context.Context, addr string) (net.Conn, error) {
return (&net.Dialer{}).DialContext(ctx, unixProtocol, addr)
}
func ParseEndpointWithFallbackProtocol(endpoint string, fallbackProtocol string) (protocol string, addr string, err error) {
if protocol, addr, err = ParseEndpoint(endpoint); err != nil && protocol == "" {
fallbackEndpoint := fallbackProtocol + "://" + endpoint
protocol, addr, err = ParseEndpoint(fallbackEndpoint)
if err != nil {
return "", "", err
}
}
return protocol, addr, err
}
func ParseEndpoint(endpoint string) (string, string, error) {
u, err := url.Parse(endpoint)
if err != nil {
return "", "", fmt.Errorf("error while parsing: %w", err)
}
switch u.Scheme {
case "tcp":
return "tcp", u.Host, nil
case "unix":
return "unix", u.Path, nil
case "":
return "", "", fmt.Errorf("using %q as %w", endpoint, ErrEndpointDeprecated)
default:
return u.Scheme, "", fmt.Errorf("%q: %w", u.Scheme, ErrProtocolNotSupported)
}
}
func ListImages(ctx context.Context, images v1.ImageServiceClient) (list []*v1.Image, err error) {
request := &v1.ListImagesRequest{Filter: nil}
resp, err := images.ListImages(ctx, request)
if err != nil {
return nil, err
}
return resp.Images, nil
}
func ListContainers(ctx context.Context, runtime v1.RuntimeServiceClient) (list []*v1.Container, err error) {
resp, err := runtime.ListContainers(ctx, new(v1.ListContainersRequest))
if err != nil {
return nil, err
}
return resp.Containers, nil
}
func GetRunningImages(containers []*v1.Container, idToImageMap map[string]unversioned.Image) map[string]string {
// Images that are running
// map of (digest | tag) -> digest
runningImages := make(map[string]string)
for _, container := range containers {
curr := container.Image
imageID := curr.GetImage()
runningImages[imageID] = imageID
for _, name := range idToImageMap[imageID].Names {
runningImages[name] = imageID
}
for _, digest := range idToImageMap[imageID].Digests {
runningImages[digest] = imageID
}
}
return runningImages
}
func GetNonRunningImages(runningImages map[string]string, allImages []unversioned.Image, idToImageMap map[string]unversioned.Image) map[string]string {
// Images that aren't running
// map of (digest | tag) -> digest
nonRunningImages := make(map[string]string)
for _, img := range allImages {
imageID := img.ImageID
if _, isRunning := runningImages[imageID]; !isRunning {
nonRunningImages[imageID] = imageID
for _, name := range idToImageMap[imageID].Names {
nonRunningImages[name] = imageID
}
for _, digest := range idToImageMap[imageID].Digests {
nonRunningImages[digest] = imageID
}
}
}
return nonRunningImages
}
func IsExcluded(excluded map[string]struct{}, img string, idToImageMap map[string]unversioned.Image) bool {
if len(excluded) == 0 {
return false
}
// check if img excluded by digest
if _, contains := excluded[img]; contains {
return true
}
// check if img excluded by name
for _, imgName := range idToImageMap[img].Names {
if _, contains := excluded[imgName]; contains {
return true
}
}
for _, digest := range idToImageMap[img].Digests {
if _, contains := excluded[digest]; contains {
return true
}
}
// look for excluded repository values and names without tag
for key := range excluded {
// if excluded key ends in /*, check image with pattern match
if strings.HasSuffix(key, "/*") {
// store repository name
repo := strings.Split(key, "*")
// check if img is part of repo
if match := strings.HasPrefix(img, repo[0]); match {
return true
}
// retrieve and check by name in the case img is digest
for _, imgName := range idToImageMap[img].Names {
if match := strings.HasPrefix(imgName, repo[0]); match {
return true
}
}
for _, digest := range idToImageMap[img].Digests {
if match := strings.HasPrefix(digest, repo[0]); match {
return true
}
}
}
// if excluded key ends in :*, check image with pattern patch
if strings.HasSuffix(key, ":*") {
// store image name
imagePath := strings.Split(key, ":")
if match := strings.HasPrefix(img, imagePath[0]); match {
return true
}
// retrieve and check by name in the case img is digest
for _, imgName := range idToImageMap[img].Names {
if match := strings.HasPrefix(imgName, imagePath[0]); match {
return true
}
}
for _, digest := range idToImageMap[img].Digests {
if match := strings.HasPrefix(digest, imagePath[0]); match {
return true
}
}
}
}
return false
}
func ParseImageList(path string) ([]string, error) {
imagelist := []string{}
//nolint:gosec // G304: Reading image list file is intended functionality
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
if err := json.Unmarshal(data, &imagelist); err != nil {
return nil, err
}
return imagelist, nil
}
func ParseExcluded() (map[string]struct{}, error) {
excludedMap := make(map[string]struct{})
var excludedList []string
files, err := os.ReadDir("./")
if err != nil {
return nil, err
}
for _, file := range files {
if strings.HasPrefix(file.Name(), "exclude-") {
temp, err := readConfigMap(file.Name())
if err != nil {
return nil, err
}
excludedList = append(excludedList, temp...)
}
}
for _, img := range excludedList {
excludedMap[img] = struct{}{}
}
return excludedMap, nil
}
func BoolPtr(b bool) *bool {
return &b
}
func readConfigMap(path string) ([]string, error) {
var fileName string
files, err := os.ReadDir(path)
if err != nil {
return nil, err
}
for _, f := range files {
if strings.HasSuffix(f.Name(), ".json") {
fileName = f.Name()
break
}
}
var images []string
//nolint:gosec // G304: Reading excluded images file is intended functionality
data, err := os.ReadFile(path + "/" + fileName)
if os.IsNotExist(err) {
return nil, err
} else if err != nil {
return nil, err
}
var result ExclusionList
if err := json.Unmarshal(data, &result); err != nil {
return nil, err
}
images = append(images, result.Excluded...)
return images, nil
}
func ReadCollectScanPipe(ctx context.Context) ([]unversioned.Image, error) {
timer := time.NewTimer(time.Second)
if !timer.Stop() {
<-timer.C
}
defer timer.Stop()
var f *os.File
for {
var err error
f, err = os.OpenFile(CollectScanPath, os.O_RDONLY, 0)
if err == nil {
break
}
if !os.IsNotExist(err) {
return nil, err
}
timer.Reset(time.Second)
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-timer.C:
continue
}
}
// json data is list of []eraserv1.Image
data, err := io.ReadAll(f)
if err != nil {
return nil, err
}
allImages := []unversioned.Image{}
if err = json.Unmarshal(data, &allImages); err != nil {
return nil, err
}
return allImages, nil
}
func WriteScanErasePipe(vulnerableImages []unversioned.Image) error {
data, err := json.Marshal(vulnerableImages)
if err != nil {
return err
}
if err = unix.Mkfifo(ScanErasePath, PipeMode); err != nil {
return err
}
file, err := os.OpenFile(ScanErasePath, os.O_WRONLY, 0)
if err != nil {
return err
}
if _, err := file.Write(data); err != nil {
return err
}
return file.Close()
}
func ProcessRepoDigests(repoDigests []string) ([]string, []error) {
digests := []string{}
errs := []error{}
digestSet := make(map[string]struct{})
for _, repoDigest := range repoDigests {
s := strings.Split(repoDigest, "@")
if len(s) < 2 {
errs = append(errs, fmt.Errorf("repoDigest not formatted correctly: %s", repoDigest))
continue
}
digest := s[1]
digestSet[digest] = struct{}{}
}
for digest := range digestSet {
digests = append(digests, digest)
}
return digests, errs
}
================================================
FILE: pkg/utils/utils_test.go
================================================
package utils
import (
"errors"
"fmt"
"net/url"
"testing"
)
func TestParseEndpointWithFallBackProtocol(t *testing.T) {
testCases := []struct {
endpoint string
fallbackProtocol string
protocol string
addr string
errCheck func(t *testing.T, err error)
}{
{
endpoint: fmt.Sprintf("unix://%s", CRIPath),
fallbackProtocol: "unix",
protocol: "unix",
addr: CRIPath,
errCheck: func(t *testing.T, err error) {
if err != nil {
t.Error(err)
}
},
},
{
endpoint: "192.168.123.132",
fallbackProtocol: "unix",
protocol: "unix",
addr: "",
errCheck: func(t *testing.T, err error) {
if err != nil {
t.Error(err)
}
},
},
{
endpoint: "tcp://localhost:8080",
fallbackProtocol: "unix",
protocol: "tcp",
addr: "localhost:8080",
errCheck: func(t *testing.T, err error) {
if err != nil {
t.Error(err)
}
},
},
{
endpoint: " ",
fallbackProtocol: "unix",
protocol: "",
addr: "",
errCheck: func(t *testing.T, err error) {
as := &url.Error{}
if !errors.As(err, &as) {
t.Error(err)
}
},
},
}
for _, tc := range testCases {
p, a, e := ParseEndpointWithFallbackProtocol(tc.endpoint, tc.fallbackProtocol)
if p != tc.protocol || a != tc.addr {
t.Errorf("Test fails")
}
tc.errCheck(t, e)
}
}
func TestParseEndpoint(t *testing.T) {
testCases := []struct {
endpoint string
protocol string
addr string
errCheck func(t *testing.T, err error)
}{
{
endpoint: fmt.Sprintf("unix://%s", CRIPath),
protocol: "unix",
addr: CRIPath,
errCheck: func(t *testing.T, err error) {
if err != nil {
t.Error(err)
}
},
},
{
endpoint: "192.168.123.132",
protocol: "",
addr: "",
errCheck: func(t *testing.T, err error) {
if !errors.Is(err, ErrEndpointDeprecated) {
t.Error(err)
}
},
},
{
endpoint: "https://myaccount.blob.core.windows.net/mycontainer/myblob",
protocol: "https",
addr: "",
errCheck: func(t *testing.T, err error) {
if !errors.Is(err, ErrProtocolNotSupported) {
t.Error(err)
}
},
},
{
endpoint: "unix:// ",
protocol: "",
addr: "",
errCheck: func(t *testing.T, err error) {
as := &url.Error{}
if !errors.As(err, &as) {
t.Error(err)
}
},
},
}
for _, tc := range testCases {
p, a, e := ParseEndpoint(tc.endpoint)
if p != tc.protocol || a != tc.addr {
t.Errorf("Test fails")
}
tc.errCheck(t, e)
}
}
func TestGetAddressAndDialer(t *testing.T) {
testCases := []struct {
endpoint string
addr string
err error
}{
{
endpoint: fmt.Sprintf("unix://%s", CRIPath),
addr: CRIPath,
err: nil,
},
{
endpoint: "localhost:8080",
addr: "",
err: ErrProtocolNotSupported,
},
{
endpoint: "tcp://localhost:8080",
addr: "",
err: ErrOnlySupportUnixSocket,
},
}
for _, tc := range testCases {
a, _, e := getAddressAndDialer(tc.endpoint)
if a != tc.addr || !errors.Is(e, tc.err) {
t.Errorf("Test fails")
}
}
}
================================================
FILE: test/e2e/kind-config-custom-runtime.yaml
================================================
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
- role: worker
containerdConfigPatches:
- |
[grpc]
address = "/fake/socket/address.sock"
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
criSocket: "/fake/socket/address.sock"
- |
kind: JoinConfiguration
nodeRegistration:
criSocket: "/fake/socket/address.sock"
================================================
FILE: test/e2e/kind-config.yaml
================================================
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
- role: worker
- role: worker
================================================
FILE: test/e2e/test-data/Dockerfile.busybox
================================================
ARG IMG
FROM ${IMG}
RUN echo 'echo "$@"; sleep 360;' > /script.sh && chmod +x /script.sh
ENTRYPOINT ["/bin/sh", "/script.sh"]
================================================
FILE: test/e2e/test-data/Dockerfile.customNode
================================================
ARG KUBERNETES_VERSION
FROM kindest/node:v${KUBERNETES_VERSION}
ENV CONTAINERD_ADDRESS="/fake/socket/address.sock"
================================================
FILE: test/e2e/test-data/Dockerfile.dummyCollector
================================================
FROM busybox:latest
ENTRYPOINT ["yes"]
================================================
FILE: test/e2e/test-data/eraser_v1_imagelist.yaml
================================================
apiVersion: eraser.sh/v1
kind: ImageList
metadata:
name: imagelist
spec:
images:
- sha256:2834dc507516af02784808c5f48b7cbe38b8ed5d0f4837f16e78d00deb7e7767
- docker.io/library/nginx:latest
- nginx
================================================
FILE: test/e2e/test-data/eraser_v1alpha1_imagelist.yaml
================================================
apiVersion: eraser.sh/v1alpha1
kind: ImageList
metadata:
name: imagelist
spec:
images:
- sha256:2834dc507516af02784808c5f48b7cbe38b8ed5d0f4837f16e78d00deb7e7767
- docker.io/library/nginx:latest
- nginx
================================================
FILE: test/e2e/test-data/eraser_v1alpha1_imagelist_updated.yaml
================================================
apiVersion: eraser.sh/v1alpha1
kind: ImageList
metadata:
name: imagelist
spec:
images:
- "*"
================================================
FILE: test/e2e/test-data/helm-empty-values.yaml
================================================
# This file exists to prevent regression in the tests. A situation arose in
# which the helm keys were wrong in the e2e tests, resulting in helm receiving
# the default values for image repository and tag. This resulted in false
# positives because the cluster used in the e2e test would pull in the default
# image from a registry.
#
# For all e2e tests using helm, this file should be provided on the command
# line using `helm install -f`. This ensures that the default values are never
# used. The correct values will be supplied by `--set` flags further to the
# right on the command line.
#
# Below, randomized names are used to guarantee that if the wrong helm keys are
# used, the test will fail.
runtimeConfig:
apiVersion: eraser.sh/v1alpha3
manager:
runtime:
name: containerd
address: unix:///run/containerd/containerd.sock
components:
collector:
image:
repo: "ychoimvthinanopp"
tag: "wpsestipmlioujqd"
scanner:
image:
repo: "aezoqcrjrsmxryrn"
tag: "mwojcakgxrqcudmn"
remover:
image:
repo: "eultpoofmlmfdthr"
tag: "otosqrwfwrgdrvzo"
deploy:
image:
repo: "tbiuomsxwcpmnpqi"
tag: "pgtyeohbgckhpuvz"
pullPolicy: IfNotPresent
================================================
FILE: test/e2e/test-data/helm-test-config.yaml
================================================
controllerManager:
config:
manager:
imageJob:
cleanup:
delayOnSuccess: 1m
components:
scanner:
config: |
deleteFailedImages: false
deleteEOLImages: true
================================================
FILE: test/e2e/test-data/imagelist_alpine.yaml
================================================
apiVersion: eraser.sh/v1alpha1
kind: ImageList
metadata:
name: imagelist
spec:
images:
- docker.io/library/alpine:3.7.3
================================================
FILE: test/e2e/test-data/otelcollector.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
name: otel-collector-conf
labels:
app: opentelemetry
component: otel-collector-conf
data:
otel-collector-config: |
receivers:
otlp:
protocols:
http:
exporters:
logging:
loglevel: debug
prometheus:
endpoint: "0.0.0.0:8889"
send_timestamps: true
metric_expiration: 180m
service:
telemetry:
logs:
encoding: json
pipelines:
metrics:
receivers:
- otlp
exporters:
- logging
- prometheus
---
apiVersion: v1
kind: Service
metadata:
name: otel-collector
labels:
app: opentelemetry
component: otel-collector
spec:
ports:
- name: otlp-http # Default endpoint for OpenTelemetry HTTP receiver.
port: 4318
protocol: TCP
- name: prometheus
port: 80
targetPort: 8889
protocol: TCP
selector:
component: otel-collector
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: otel-collector
labels:
app: opentelemetry
component: otel-collector
spec:
selector:
matchLabels:
app: opentelemetry
component: otel-collector
minReadySeconds: 5
progressDeadlineSeconds: 120
replicas: 1
template:
metadata:
labels:
app: opentelemetry
component: otel-collector
spec:
containers:
- command:
- "/otelcol"
- "--config=/conf/otel-collector-config.yaml"
image: otel/opentelemetry-collector:0.61.0
name: otel-collector
resources:
limits:
memory: 2Gi
requests:
cpu: 200m
memory: 400Mi
ports:
- containerPort: 4318 # Default endpoint for OpenTelemetry receiver.
- containerPort: 8889 # Endpoint for Prometheus exporter.
volumeMounts:
- name: otel-collector-config-vol
mountPath: /conf
volumes:
- configMap:
name: otel-collector-conf
items:
- key: otel-collector-config
path: otel-collector-config.yaml
name: otel-collector-config-vol
================================================
FILE: test/e2e/tests/collector_delete_deployment/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestDeleteDeployment(t *testing.T) {
deleteDeploymentFeat := features.New("Delete deployment should delete eraser pods").
Assess("Non-vulnerable image successfully deleted from all nodes", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.NonVulnerableImage)
return ctx
}).
Assess("Delete deployment", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
if err := util.KubectlDelete(cfg.KubeconfigFile(), util.TestNamespace, []string{"deployment", "eraser-controller-manager"}); err != nil {
t.Error("unable to delete eraser-controller-manager deployment")
}
return ctx
}).
Assess("Check eraser pods are deleted", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
c, err := cfg.NewClient()
if err != nil {
t.Fatal("Failed to create new client", err)
}
var ls corev1.PodList
err = c.Resources().List(ctx, &ls, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{util.ImageJobTypeLabelKey: util.CollectorLabel}).String()
})
if err != nil {
t.Errorf("could not list pods: %v", err)
}
err = wait.For(conditions.New(c.Resources()).ResourcesDeleted(&ls), wait.WithTimeout(util.Timeout))
if err != nil {
t.Errorf("error waiting for pods to be deleted: %v", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, deleteDeploymentFeat)
}
================================================
FILE: test/e2e/tests/collector_delete_deployment/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
collectorImage := util.ParsedImages.CollectorImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.CollectorImage, util.CollectorTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.NonVulnerableImage, ""),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.CollectorEnable.Set("true"),
"--set", util.CollectorImageRepo.Set(collectorImage.Repo),
"--set", util.CollectorImageTag.Set(collectorImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("2m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/collector_delete_manager/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"time"
"github.com/eraser-dev/eraser/test/e2e/util"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestDeleteManager(t *testing.T) {
deleteManagerFeat := features.New("Deleting manager pod while current ImageJob is running should delete ImageJob and restart").
Assess("Wait for eraser pods running", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
c, err := cfg.NewClient()
if err != nil {
t.Fatal("Failed to create new client", err)
}
err = wait.For(
util.NumPodsPresentForLabel(ctx, c, 3, util.ImageJobTypeLabelKey+"="+util.CollectorLabel),
wait.WithTimeout(time.Minute*2),
wait.WithInterval(time.Millisecond*500),
)
if err != nil {
t.Fatal(err)
}
return ctx
}).
Assess("Delete controller-manager pod", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
c, err := cfg.NewClient()
if err != nil {
t.Fatal("Failed to create new client", err)
}
// get manager pod
var podList corev1.PodList
err = c.Resources().List(ctx, &podList, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{util.ManagerLabelKey: util.ManagerLabelValue}).String()
})
if err != nil {
t.Errorf("could not list manager pods: %v", err)
}
if len(podList.Items) != 1 {
t.Error("incorrect number of manager pods: ", len(podList.Items))
}
// get current ImageJob before deleting manager pod
var jobList eraserv1alpha1.ImageJobList
err = c.Resources().List(ctx, &jobList)
if err != nil {
t.Errorf("could not list ImageJob: %v", err)
}
t.Log("job", jobList.Items[0], "name", jobList.Items[0].Name)
if len(jobList.Items) != 1 {
t.Error("incorrect number of ImageJobs: ", len(jobList.Items))
}
// delete manager pod
if err := util.KubectlDelete(cfg.KubeconfigFile(), util.TestNamespace, []string{"pod", podList.Items[0].Name}); err != nil {
t.Error("unable to delete eraser-controller-manager pod")
}
// wait for deletion of ImageJob
err = wait.For(conditions.New(c.Resources()).ResourcesDeleted(&jobList), wait.WithTimeout(util.Timeout))
if err != nil {
t.Errorf("error waiting for ImageJob to be deleted: %v", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, deleteManagerFeat)
}
================================================
FILE: test/e2e/tests/collector_delete_manager/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
collectorImage := util.ParsedImages.CollectorImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.CollectorDummyImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.NonVulnerableImage, ""),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.CollectorEnable.Set("true"),
"--set", util.CollectorImageRepo.Set(collectorImage.Repo),
"--set", util.CollectorImageTag.Set("dummy"),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("2m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/collector_disable_scan/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestDisableScanner(t *testing.T) {
disableScanFeat := features.New("Scanner can be disabled").
// non-vulnerable image should be deleted from all nodes when scanner is disabled and we prune with collector
Assess("Non-vulnerable image successfully deleted from all nodes", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.NonVulnerableImage)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, disableScanFeat)
}
================================================
FILE: test/e2e/tests/collector_disable_scan/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
collectorImage := util.ParsedImages.CollectorImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.CollectorImage, util.CollectorTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.NonVulnerableImage, ""),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.CollectorEnable.Set("true"),
"--set", util.CollectorImageRepo.Set(collectorImage.Repo),
"--set", util.CollectorImageTag.Set(collectorImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/collector_ensure_scan/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestEnsureScannerFunctions(t *testing.T) {
collectScanErasePipelineFeat := features.New("Collector pods should run automatically, trigger the scanner, then the eraser pods. Helm test.").
Assess("Vulnerable and EOL images are successfully deleted from all nodes", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.Alpine)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, collectScanErasePipelineFeat)
}
================================================
FILE: test/e2e/tests/collector_ensure_scan/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
collectorImage := util.ParsedImages.CollectorImage
scannerImage := util.ParsedImages.ScannerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.CollectorImage, util.CollectorTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.VulnerableImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.NonVulnerableImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.EOLImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.ScannerImage, util.ScannerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.ScannerEnable.Set("true"),
"--set", util.ScannerImageRepo.Set(scannerImage.Repo),
"--set", util.ScannerImageTag.Set(scannerImage.Tag),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.CollectorEnable.Set("true"),
"--set", util.CollectorImageRepo.Set(collectorImage.Repo),
"--set", util.CollectorImageTag.Set(collectorImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
// set deleteFailedImages to FALSE to catch a broken scanner
"--set-json", util.ScannerConfig.Set(util.ScannerConfigNoDeleteFailedJSON),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/collector_pipeline/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestCollectScanErasePipeline(t *testing.T) {
collectScanErasePipelineFeat := features.New("Collector pods should run automatically, trigger the scanner, then the eraser pods. Manifest deployment test.").
Assess("Vulnerable and EOL images are successfully deleted from all nodes", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.Alpine)
return ctx
}).
Assess("Pods from imagejobs are cleaned up", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
c, err := cfg.NewClient()
if err != nil {
t.Fatal("Failed to create new client", err)
}
var ls corev1.PodList
err = c.Resources().List(ctx, &ls, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{util.ImageJobTypeLabelKey: util.CollectorLabel}).String()
})
if err != nil {
t.Errorf("could not list pods: %v", err)
}
err = wait.For(conditions.New(c.Resources()).ResourcesDeleted(&ls), wait.WithTimeout(util.Timeout))
if err != nil {
t.Errorf("error waiting for pods to be deleted: %v", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, collectScanErasePipelineFeat)
}
================================================
FILE: test/e2e/tests/collector_pipeline/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
remover := util.ParsedImages.RemoverImage
collector := util.ParsedImages.CollectorImage
scanner := util.ParsedImages.ScannerImage
manager := util.ParsedImages.ManagerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.EraserNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.ScannerImage, util.ScannerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.CollectorImage, util.CollectorTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.VulnerableImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.EOLImage, ""),
util.MakeDeploy(map[string]string{
"REMOVER_REPO": remover.Repo,
"MANAGER_REPO": manager.Repo,
"TRIVY_SCANNER_REPO": scanner.Repo,
"COLLECTOR_REPO": collector.Repo,
"REMOVER_TAG": remover.Tag,
"MANAGER_TAG": manager.Tag,
"TRIVY_SCANNER_TAG": scanner.Tag,
"COLLECTOR_TAG": collector.Tag,
}),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/collector_runtime_config/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestCustomRuntimeAddress(t *testing.T) {
collectRuntimeFeat := features.New("Collector pods should run automatically, trigger the scanner, then the eraser pods. Helm test with custom runtime socket address.").
Assess("Vulnerable and EOL images are successfully deleted from all nodes", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.Alpine)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, collectRuntimeFeat)
}
================================================
FILE: test/e2e/tests/collector_runtime_config/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
collectorImage := util.ParsedImages.CollectorImage
scannerImage := util.ParsedImages.ScannerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.ModifiedNodeImage, util.KindConfigCustomRuntimePath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.CollectorImage, util.CollectorTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.VulnerableImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.NonVulnerableImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.EOLImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.ScannerImage, util.ScannerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.ScannerEnable.Set("true"),
"--set", util.ScannerImageRepo.Set(scannerImage.Repo),
"--set", util.ScannerImageTag.Set(scannerImage.Tag),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.CollectorEnable.Set("true"),
"--set", util.CollectorImageRepo.Set(collectorImage.Repo),
"--set", util.CollectorImageTag.Set(collectorImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
// set deleteFailedImages to FALSE to catch a broken scanner
"--set-json", util.ScannerConfig.Set(util.ScannerConfigNoDeleteFailedJSON),
// set custom runtime socket as runtime address
"--set", util.CustomRuntimeAddress.Set("unix:///fake/socket/address.sock"),
"--set", util.CustomRuntimeName.Set("containerd"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/collector_skip_excluded/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"time"
"github.com/eraser-dev/eraser/test/e2e/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestCollectorExcluded(t *testing.T) {
collectorExcluded := features.New("ImageCollector should not remove excluded images").
Assess("Collector pods completed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
c, err := cfg.NewClient()
if err != nil {
t.Fatal("Failed to create new client", err)
}
var ls corev1.PodList
err = c.Resources().List(ctx, &ls, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{util.ImageJobTypeLabelKey: util.CollectorLabel}).String()
})
if err != nil {
t.Errorf("could not list pods: %v", err)
}
for _, pod := range ls.Items {
err = wait.For(conditions.New(c.Resources()).PodPhaseMatch(&pod, corev1.PodSucceeded), wait.WithTimeout(time.Minute*3))
if err != nil {
t.Log("collector pod unsuccessful", pod.Name)
}
}
return ctx
}).
Assess("Alpine image is not removed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
_, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImagesExist(t, util.GetClusterNodes(t), util.VulnerableImage)
return ctx
}).
Assess("Non-vulnerable image is not removed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
_, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImagesExist(t, util.GetClusterNodes(t), util.NonVulnerableImage)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, collectorExcluded)
}
================================================
FILE: test/e2e/tests/collector_skip_excluded/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
collectorImage := util.ParsedImages.CollectorImage
scannerImage := util.ParsedImages.ScannerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.CollectorImage, util.CollectorTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.VulnerableImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.NonVulnerableImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.ScannerImage, util.ScannerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.CreateExclusionList(util.TestNamespace, "{\"excluded\": [\"docker.io/library/alpine:*\"]}"),
util.CreateExclusionList(util.TestNamespace, "{\"excluded\": [\""+util.NonVulnerableImage+"\"]}"),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.ScannerEnable.Set("true"),
"--set", util.ScannerImageRepo.Set(scannerImage.Repo),
"--set", util.ScannerImageTag.Set(scannerImage.Tag),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.CollectorEnable.Set("true"),
"--set", util.CollectorImageRepo.Set(collectorImage.Repo),
"--set", util.CollectorImageTag.Set(collectorImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/configmap_update/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/eraser-dev/eraser/test/e2e/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
const (
numPods = 3
configKey = "controller_manager_config.yaml"
configmapName = "eraser-manager-config"
)
var ()
func TestConfigmapUpdate(t *testing.T) {
metrics := features.New("Updating the remover image in the configmap should cause the manager to deploy using the new image").
Assess("Update configmap, change remover image to busybox", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
configMap := corev1.ConfigMap{}
err = client.Resources().Get(ctx, configmapName, util.TestNamespace, &configMap)
if err != nil {
t.Error("Unable to get configmap", err)
}
bbSplit := strings.Split(util.BusyboxImage, ":")
bbRepo := bbSplit[0]
bbTag := bbSplit[1]
cmString := fmt.Sprintf(`---
apiVersion: eraser.sh/v1alpha2
kind: EraserConfig
components:
remover:
image:
repo: %s
tag: %s
`, bbRepo, bbTag)
configMap.Data[configKey] = cmString
err = client.Resources().Update(ctx, &configMap)
if err != nil {
t.Error("unable to update configmap", err)
}
return ctx
}).
Assess("Deploy Imagelist", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// deploy imagelist config
if err := util.DeployEraserConfig(cfg.KubeconfigFile(), cfg.Namespace(), util.ImagelistAlpinePath); err != nil {
t.Error("Failed to deploy image list config", err)
}
return ctx
}).
Assess("Check eraser pods for change in configuration", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
c, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
err = wait.For(
util.NumPodsPresentForLabel(ctx, c, numPods, util.ImageJobTypeLabelKey+"="+util.ManualLabel),
wait.WithTimeout(time.Minute*2),
wait.WithInterval(time.Millisecond*500),
)
if err != nil {
t.Fatal(err)
}
var ls corev1.PodList
err = c.Resources().List(ctx, &ls, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{util.ImageJobTypeLabelKey: util.ManualLabel}).String()
})
if err != nil {
t.Errorf("could not list pods: %v", err)
}
for i := range ls.Items {
// there will only be the remover container in an imagelist deployment
container := ls.Items[i].Spec.Containers[0]
image := container.Image
if image != util.BusyboxImage {
t.Errorf("pod %s has image %s, should be %s", ls.Items[i].GetName(), image, util.BusyboxImage)
}
}
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, metrics)
}
================================================
FILE: test/e2e/tests/configmap_update/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.BusyboxImage, ""),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.CollectorEnable.Set("false"),
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/helm_pull_secret/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"time"
"github.com/eraser-dev/eraser/test/e2e/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
const (
expectedPods = 4
)
func TestHelmPullSecret(t *testing.T) {
pullSecretsPropagated := features.New("Image Pull Secrets").
Assess("All pods should have the correct pull secret", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
c, err := cfg.NewClient()
if err != nil {
t.Fatal("Failed to create new client", err)
}
err = wait.For(
util.NumPodsPresentForLabel(ctx, c, 3, util.ImageJobTypeLabelKey+"="+util.CollectorLabel),
wait.WithTimeout(time.Minute*2),
wait.WithInterval(time.Millisecond*500),
)
if err != nil {
t.Fatal(err)
}
var ls corev1.PodList
err = c.Resources().List(ctx, &ls, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{util.ImageJobTypeLabelKey: util.CollectorLabel}).String()
})
if err != nil {
t.Errorf("could not list pods: %v", err)
}
var ls2 corev1.PodList
err = c.Resources().List(ctx, &ls2, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{"control-plane": "controller-manager"}).String()
})
items := append(ls.Items, ls2.Items...)
if len(items) != expectedPods {
t.Errorf("incorrect number of pods for eraser deployment. should be %d but was %d", expectedPods, len(items))
}
for _, pod := range items {
found := false
for _, secret := range pod.Spec.ImagePullSecrets {
if secret.Name == util.ImagePullSecret {
found = true
break
}
}
if !found {
t.Errorf("pod %s does not have secret set", pod.ObjectMeta.Name)
}
}
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, pullSecretsPropagated)
}
================================================
FILE: test/e2e/tests/helm_pull_secret/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
collectorImage := util.ParsedImages.CollectorImage
scannerImage := util.ParsedImages.ScannerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.CollectorImage, util.CollectorTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.ScannerImage, util.ScannerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.ScannerEnable.Set("true"),
"--set", util.ScannerImageRepo.Set(scannerImage.Repo),
"--set", util.ScannerImageTag.Set(scannerImage.Tag),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.CollectorEnable.Set("true"),
"--set", util.CollectorImageRepo.Set(collectorImage.Repo),
"--set", util.CollectorImageTag.Set(collectorImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set-json", util.ImagePullSecrets.Set(util.ImagePullSecretJSON),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/helm_pull_secret_imagelist/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"time"
"github.com/eraser-dev/eraser/test/e2e/util"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
const (
expectedPods = 4
)
func TestHelmPullSecretImagelist(t *testing.T) {
pullSecretsPropagated := features.New("Image Pull Secrets").
Assess("All pods should have the correct pull secret", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
c, err := cfg.NewClient()
if err != nil {
t.Fatal("Failed to create new client", err)
}
imgList := &eraserv1.ImageList{
ObjectMeta: metav1.ObjectMeta{Name: util.Prune},
Spec: eraserv1.ImageListSpec{
Images: []string{"*"},
},
}
if err := cfg.Client().Resources().Create(ctx, imgList); err != nil {
t.Fatal(err)
}
err = wait.For(
util.NumPodsPresentForLabel(ctx, c, 3, util.ImageJobTypeLabelKey+"="+util.ManualLabel),
wait.WithTimeout(time.Minute*2),
wait.WithInterval(time.Millisecond*500),
)
if err != nil {
t.Fatal(err)
}
var ls corev1.PodList
err = c.Resources().List(ctx, &ls, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{util.ImageJobTypeLabelKey: util.ManualLabel}).String()
})
if err != nil {
t.Errorf("could not list pods: %v", err)
}
var ls2 corev1.PodList
err = c.Resources().List(ctx, &ls2, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{"control-plane": "controller-manager"}).String()
})
items := append(ls.Items, ls2.Items...)
if len(items) != expectedPods {
t.Errorf("incorrect number of pods for eraser deployment. should be %d but was %d", expectedPods, len(items))
}
for _, pod := range items {
found := false
for _, secret := range pod.Spec.ImagePullSecrets {
if secret.Name == util.ImagePullSecret {
found = true
break
}
}
if !found {
t.Errorf("pod %s does not have secret set", pod.ObjectMeta.Name)
}
}
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, pullSecretsPropagated)
}
================================================
FILE: test/e2e/tests/helm_pull_secret_imagelist/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
utilruntime.Must(eraserv1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.DeployEraserHelm(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.CollectorEnable.Set("false"),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set-json", util.ImagePullSecrets.Set(util.ImagePullSecretJSON),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/imagelist_alias/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
"github.com/eraser-dev/eraser/test/e2e/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
type nodeString string
const (
nginxOneName = "nginxone"
nginxTwoName = "nginxtwo"
nodeNameKey nodeString = "nodeName"
)
func TestEnsureAliasedImageRemoved(t *testing.T) {
aliasFix := features.New("Specifying an image alias in the image list will delete the underlying image").
// Deploy 3 deployments with different images
// We'll shutdown two of them, run eraser with `*`, then check that the images for the removed deployments are removed from the cluster.
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// Ensure that both nginx:one and nginx:two are tags for the same image digest
_, err := util.DockerPullImage(util.NginxLatest)
if err != nil {
t.Error("failed to pull nginx image", err)
}
// Schedule two pods on a single node. Both pods will create containers from the same image,
// but each pod refers to that same image by a different tag.
nodeName := util.GetClusterNodes(t)[0]
// At ghcr.io/eraser-dev/eraser/e2e-test/nginx there is a repository
// containing three tags. The three tags are `latest`, `one` and
// `two`. They are all aliases for the same image; only the name
// differs. These images are maintained there in order to avoid
// sideloading images into the kind cluster, which has a known bug
// associated with it. See https://github.com/containerd/containerd/issues/7698
// for more information.
nginxOnePod := util.NewPod(cfg.Namespace(), util.NginxAliasOne, nginxOneName, nodeName)
ctx = context.WithValue(ctx, nodeNameKey, nodeName)
if err := cfg.Client().Resources().Create(ctx, nginxOnePod); err != nil {
t.Error("Failed to create the nginx pod", err)
}
nginxTwoPod := util.NewPod(cfg.Namespace(), util.NginxAliasTwo, nginxTwoName, nodeName)
if err := cfg.Client().Resources().Create(ctx, nginxTwoPod); err != nil {
t.Error("Failed to create the nginx pod", err)
}
return ctx
}).
Assess("Pods successfully deployed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
resultPod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: nginxOneName, Namespace: cfg.Namespace()},
}
err = wait.For(conditions.New(client.Resources()).PodConditionMatch(&resultPod, corev1.PodReady, corev1.ConditionTrue), wait.WithTimeout(util.Timeout))
if err != nil {
t.Error("pod not deployed", err)
}
resultPod = corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: nginxTwoName, Namespace: cfg.Namespace()},
}
err = wait.For(conditions.New(client.Resources()).PodConditionMatch(&resultPod, corev1.PodReady, corev1.ConditionTrue), wait.WithTimeout(util.Timeout))
if err != nil {
t.Error("pod not deployed", err)
}
return ctx
}).
Assess("Pods successfully deleted", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
var (
nginxOnePod corev1.Pod
nginxTwoPod corev1.Pod
)
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
if err := client.Resources().Get(ctx, nginxOneName, util.TestNamespace, &nginxOnePod); err != nil {
t.Error("Failed to get the pod", err)
}
if err := client.Resources().Get(ctx, nginxTwoName, util.TestNamespace, &nginxTwoPod); err != nil {
t.Error("Failed to get the pod", err)
}
// Delete the pods, so they will be cleaned up
if err := client.Resources().Delete(ctx, &nginxOnePod); err != nil {
t.Error("Failed to delete the pod", err)
}
if err := client.Resources().Delete(ctx, &nginxTwoPod); err != nil {
t.Error("Failed to delete the pod", err)
}
toDelete := corev1.PodList{
Items: []corev1.Pod{nginxOnePod, nginxTwoPod},
}
err = wait.For(conditions.New(client.Resources()).ResourcesDeleted(&toDelete))
if err != nil {
t.Error("failed to delete pods", err)
}
nodeName, ok := ctx.Value(nodeNameKey).(string)
if !ok {
t.Error("something is terribly wrong with the nodeName value")
}
if err := wait.For(util.ContainerNotPresentOnNode(nodeName, nginxOneName), wait.WithTimeout(util.Timeout)); err != nil {
// Let's not mark this as an error
// We only have this to prevent race conditions with the eraser spinning up
t.Logf("error while waiting for deployment deletion: %v", err)
}
if err := wait.For(util.ContainerNotPresentOnNode(nodeName, nginxTwoName), wait.WithTimeout(util.Timeout)); err != nil {
// Let's not mark this as an error
// We only have this to prevent race conditions with the eraser spinning up
t.Logf("error while waiting for deployment deletion: %v", err)
}
return ctx
}).
Assess("Image deleted when referencing by alias", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
imgList := &eraserv1.ImageList{
ObjectMeta: metav1.ObjectMeta{Name: util.Prune},
Spec: eraserv1.ImageListSpec{
Images: []string{util.NginxAliasTwo},
},
}
if err := cfg.Client().Resources().Create(ctx, imgList); err != nil {
t.Fatal(err)
}
nodeName, ok := ctx.Value(nodeNameKey).(string)
if !ok {
t.Error("something is terribly wrong with the nodeName value")
}
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, []string{nodeName}, util.Nginx)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, aliasFix)
}
================================================
FILE: test/e2e/tests/imagelist_alias/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
utilruntime.Must(eraserv1.AddToScheme(scheme.Scheme))
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.CollectorEnable.Set("false"),
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/imagelist_change/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestUpdateImageList(t *testing.T) {
imglistChangeFeat := features.New("Updating the Imagelist should trigger an ImageJob").
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// Deploy 2 deployments with different images (nginx, redis)
nginxDep := util.NewDeployment(cfg.Namespace(), util.Nginx, 2, map[string]string{"app": util.Nginx}, corev1.Container{Image: util.Nginx, Name: util.Nginx})
if err := cfg.Client().Resources().Create(ctx, nginxDep); err != nil {
t.Error("Failed to create the nginx dep", err)
}
util.NewDeployment(cfg.Namespace(), util.Redis, 2, map[string]string{"app": util.Redis}, corev1.Container{Image: util.Redis, Name: util.Redis})
err := cfg.Client().Resources().Create(ctx, util.NewDeployment(cfg.Namespace(), util.Redis, 2, map[string]string{"app": util.Redis}, corev1.Container{Image: util.Redis, Name: util.Redis}))
if err != nil {
t.Fatal(err)
}
return ctx
}).
Assess("Deployments successfully deployed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
nginxDep := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: util.Nginx, Namespace: cfg.Namespace()},
}
if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&nginxDep, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(util.Timeout)); err != nil {
t.Fatal("nginx deployment not found", err)
}
ctx = context.WithValue(ctx, util.Nginx, &nginxDep)
redisDep := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: util.Redis, Namespace: cfg.Namespace()},
}
if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&redisDep, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(util.Timeout)); err != nil {
t.Fatal("redis deployment not found", err)
}
ctx = context.WithValue(ctx, util.Redis, &redisDep)
return ctx
}).
Assess("Remove deployments so the images aren't running", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// Here we remove the redis and nginx deployments
var redisPods corev1.PodList
if err := cfg.Client().Resources().List(ctx, &redisPods, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{"app": util.Redis}).String()
}); err != nil {
t.Fatal(err)
}
if len(redisPods.Items) != 2 {
t.Fatal("missing pods in redis deployment")
}
var nginxPods corev1.PodList
if err := cfg.Client().Resources().List(ctx, &nginxPods, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{"app": util.Nginx}).String()
}); err != nil {
t.Fatal(err)
}
if len(nginxPods.Items) != 2 {
t.Fatal("missing pods in nginx deployment")
}
err := cfg.Client().Resources().Delete(ctx, ctx.Value(util.Redis).(*appsv1.Deployment))
if err != nil {
t.Fatal(err)
}
err = cfg.Client().Resources().Delete(ctx, ctx.Value(util.Nginx).(*appsv1.Deployment))
if err != nil {
t.Fatal(err)
}
for _, nodeName := range util.GetClusterNodes(t) {
err := wait.For(util.ContainerNotPresentOnNode(nodeName, util.Redis), wait.WithTimeout(util.Timeout))
if err != nil {
// Let's not mark this as an error
// We only have this to prevent race conditions with the eraser spinning up
t.Logf("error while waiting for deployment deletion: %v", err)
}
}
for _, nodeName := range util.GetClusterNodes(t) {
err := wait.For(util.ContainerNotPresentOnNode(nodeName, util.Nginx), wait.WithTimeout(util.Timeout))
if err != nil {
// Let's not mark this as an error
// We only have this to prevent race conditions with the eraser spinning up
t.Logf("error while waiting for deployment deletion: %v", err)
}
}
return ctx
}).
Assess("Deploy imagelist to remove nginx", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// deploy imageJob config
if err := util.DeployEraserConfig(cfg.KubeconfigFile(), cfg.Namespace(), util.EraserV1Alpha1ImagelistPath); err != nil {
t.Error("Failed to deploy image list config", err)
}
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.Nginx)
return ctx
}).
Assess("Update imagelist to prune rest of images", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// deploy imageJob config
if err := util.DeployEraserConfig(cfg.KubeconfigFile(), cfg.Namespace(), util.EraserV1Alpha1ImagelistUpdatedPath); err != nil {
t.Error("Failed to deploy image list config", err)
}
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.Redis)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, imglistChangeFeat)
}
================================================
FILE: test/e2e/tests/imagelist_change/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.CollectorEnable.Set("false"),
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/imagelist_exclusion_list/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestExclusionList(t *testing.T) {
excludedImageFeat := features.New("Verify Eraser will skip excluded images").
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
podSelectorLabels := map[string]string{"app": util.Nginx}
nginxDep := util.NewDeployment(cfg.Namespace(), util.Nginx, 2, podSelectorLabels, corev1.Container{Image: util.Nginx, Name: util.Nginx})
if err := cfg.Client().Resources().Create(ctx, nginxDep); err != nil {
t.Error("Failed to create the dep", err)
}
if err := util.DeleteImageListsAndJobs(cfg.KubeconfigFile()); err != nil {
t.Error("Failed to clean eraser obejcts ", err)
}
// create excluded configmap and add docker.io/library/*
excluded := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "excluded",
Namespace: cfg.Namespace(),
Labels: map[string]string{"eraser.sh/exclude.list": "true"},
},
Data: map[string]string{"test.json": "{\"excluded\": [\"docker.io/library/*\"]}"},
}
if err := cfg.Client().Resources().Create(ctx, &excluded); err != nil {
t.Error("failed to create excluded configmap", err)
}
return ctx
}).
Assess("deployment successfully deployed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
resultDeployment := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: util.Nginx, Namespace: cfg.Namespace()},
}
if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&resultDeployment, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(util.Timeout)); err != nil {
t.Error("deployment not found", err)
}
return context.WithValue(ctx, util.Nginx, &resultDeployment)
}).
Assess("Check image remains in all nodes", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// delete deployment
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
var pods corev1.PodList
err = client.Resources().List(ctx, &pods, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(labels.Set{"app": util.Nginx}).String()
})
if err != nil {
t.Fatal(err)
}
dep := ctx.Value(util.Nginx).(*appsv1.Deployment)
if err := client.Resources().Delete(ctx, dep); err != nil {
t.Error("Failed to delete the dep", err)
}
for _, nodeName := range util.GetClusterNodes(t) {
err := wait.For(util.ContainerNotPresentOnNode(nodeName, util.Nginx), wait.WithTimeout(util.Timeout))
if err != nil {
t.Logf("error while waiting for deployment deletion: %v", err)
}
}
// create imagelist to trigger deletion
if err := util.DeployEraserConfig(cfg.KubeconfigFile(), cfg.Namespace(), util.EraserV1ImagelistPath); err != nil {
t.Error("Failed to deploy image list config", err)
}
_, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
// since docker.io/library/* was excluded, nginx should still exist following deletion
util.CheckImagesExist(t, util.GetClusterNodes(t), util.Nginx)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, excludedImageFeat)
}
================================================
FILE: test/e2e/tests/imagelist_exclusion_list/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.CollectorEnable.Set("false"),
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/imagelist_include_nodes/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientgo "k8s.io/client-go/kubernetes"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestIncludeNodes(t *testing.T) {
includeNodesFeat := features.New("Applying the eraser.sh/cleanup.filter label to a node should only schedule ImageJob pods on that node").
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// fetch node info
c := cfg.Client().RESTConfig()
k8sClient, err := clientgo.NewForConfig(c)
if err != nil {
t.Error("unable to obtain k8s client from config", err)
}
podSelectorLabels := map[string]string{"app": util.Nginx}
nginxDep := util.NewDeployment(cfg.Namespace(), util.Nginx, 2, podSelectorLabels, corev1.Container{Image: util.Nginx, Name: util.Nginx})
if err := cfg.Client().Resources().Create(ctx, nginxDep); err != nil {
t.Error("Failed to create the dep", err)
}
nodeList, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: util.FilterNodeSelector})
if err != nil {
t.Errorf("unable to list node %s\n%#v", util.FilterNodeSelector, err)
}
if len(nodeList.Items) != 1 {
t.Errorf("List operation for selector %s resulted in the wrong number of nodes", util.FilterNodeSelector)
}
nodeInclude := &nodeList.Items[0]
nodeInclude.ObjectMeta.Labels[util.FilterLabelKey] = util.FilterLabelValue
nodeInclude, err = k8sClient.CoreV1().Nodes().Update(ctx, nodeInclude, metav1.UpdateOptions{})
if err != nil {
t.Errorf("unable to update node %#v with label {%s: %s}\nerror: %#v", nodeInclude, util.FilterLabelKey, util.FilterLabelValue, err)
}
return ctx
}).
Assess("Deployment and labelling the node have succeeded", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
c := cfg.Client().RESTConfig()
k8sClient, err := clientgo.NewForConfig(c)
if err != nil {
t.Error("unable to obtain k8s client from config", err)
}
err = wait.For(func() (bool, error) {
nodeList, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: util.FilterLabelKey})
if err != nil {
return false, err
}
return len(nodeList.Items) == 1, nil
}, wait.WithTimeout(util.Timeout))
if err != nil {
t.Errorf("error while waiting for selector %s to be added to node\n%#v", util.FilterNodeSelector, err)
}
resultDeployment := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: util.Nginx, Namespace: cfg.Namespace()},
}
if err = wait.For(
conditions.New(cfg.Client().Resources()).DeploymentConditionMatch(&resultDeployment, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(util.Timeout),
); err != nil {
t.Error("deployment not found", err)
}
return context.WithValue(ctx, util.Nginx, &resultDeployment)
}).
Assess("Node(s) successfully included", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// delete deployment
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
var pods corev1.PodList
err = client.Resources().List(ctx, &pods, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(labels.Set{"app": util.Nginx}).String()
})
if err != nil {
t.Fatal(err)
}
dep := ctx.Value(util.Nginx).(*appsv1.Deployment)
if err := client.Resources().Delete(ctx, dep); err != nil {
t.Error("Failed to delete the dep", err)
}
err = wait.For(util.ContainerNotPresentOnNode(util.FilterNodeName, util.Nginx), wait.WithTimeout(util.Timeout))
if err != nil {
// Let's not mark this as an error
// We only have this to prevent race conditions with the eraser spinning up
t.Logf("error while waiting for deployment deletion: %v", err)
}
// deploy imageJob config
if err = util.DeployEraserConfig(cfg.KubeconfigFile(), cfg.Namespace(), util.EraserV1Alpha1ImagelistPath); err != nil {
t.Error("Failed to deploy image list config", err)
}
// get pod logs before imagejob is deleted
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
// ensure image is removed from filtered node.
util.CheckImageRemoved(ctxT, t, []string{util.FilterNodeName}, util.Nginx)
// Wait for the imagejob to be completed by checking for its nonexistence in the cluster
err = wait.For(util.ImagejobNotInCluster(cfg.KubeconfigFile()), wait.WithTimeout(util.Timeout))
if err != nil {
t.Logf("error while waiting for imagejob cleanup: %v", err)
}
clusterNodes := util.GetClusterNodes(t)
clusterNodes = util.DeleteStringFromSlice(clusterNodes, util.FilterNodeName)
// the imagejob has done its work, so now we can check the node to make sure it didn't remove the images from the remaining nodes
util.CheckImagesExist(t, clusterNodes, util.Nginx)
return ctx
}).
Feature()
util.Testenv.Test(t, includeNodesFeat)
}
================================================
FILE: test/e2e/tests/imagelist_include_nodes/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.CollectorEnable.Set("false"),
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.FilterNodesType.Set("include"),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/imagelist_prune_images/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
"github.com/eraser-dev/eraser/test/e2e/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestPrune(t *testing.T) {
pruneImagesFeat := features.New("Prune all non-running images from cluster").
// Deploy 3 deployments with different images
// We'll shutdown two of them, run eraser with `*`, then check that the images for the removed deployments are removed from the cluster.
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
nginxDep := util.NewDeployment(cfg.Namespace(), util.Nginx, 2, map[string]string{"app": util.Nginx}, corev1.Container{Image: util.Nginx, Name: util.Nginx})
if err := cfg.Client().Resources().Create(ctx, nginxDep); err != nil {
t.Error("Failed to create the nginx dep", err)
}
util.NewDeployment(cfg.Namespace(), util.Redis, 2, map[string]string{"app": util.Redis}, corev1.Container{Image: util.Redis, Name: util.Redis})
err := cfg.Client().Resources().Create(ctx, util.NewDeployment(cfg.Namespace(), util.Redis, 2, map[string]string{"app": util.Redis}, corev1.Container{Image: util.Redis, Name: util.Redis}))
if err != nil {
t.Fatal(err)
}
util.NewDeployment(cfg.Namespace(), util.Caddy, 2, map[string]string{"app": util.Caddy}, corev1.Container{Image: util.Caddy, Name: util.Caddy})
if err := cfg.Client().Resources().Create(ctx, util.NewDeployment(cfg.Namespace(), util.Caddy, 2, map[string]string{"app": util.Caddy}, corev1.Container{Image: util.Caddy, Name: util.Caddy})); err != nil {
t.Fatal(err)
}
return ctx
}).
Assess("Deployments successfully deployed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
nginxDep := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: util.Nginx, Namespace: cfg.Namespace()},
}
if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&nginxDep, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(util.Timeout)); err != nil {
t.Fatal("nginx deployment not found", err)
}
ctx = context.WithValue(ctx, util.Nginx, &nginxDep)
redisDep := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: util.Redis, Namespace: cfg.Namespace()},
}
if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&redisDep, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(util.Timeout)); err != nil {
t.Fatal("redis deployment not found", err)
}
ctx = context.WithValue(ctx, util.Redis, &redisDep)
caddyDep := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: util.Caddy, Namespace: cfg.Namespace()},
}
if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&caddyDep, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(util.Timeout)); err != nil {
t.Fatal("caddy deployment not found", err)
}
ctx = context.WithValue(ctx, util.Caddy, &caddyDep)
return ctx
}).
Assess("Remove some of the deployments so the images aren't running", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// Here we remove the redis and caddy deployments
// Keep nginx running and ensure nginx is not deleted.
var redisPods corev1.PodList
if err := cfg.Client().Resources().List(ctx, &redisPods, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{"app": util.Redis}).String()
}); err != nil {
t.Fatal(err)
}
if len(redisPods.Items) != 2 {
t.Fatal("missing pods in redis deployment")
}
var caddyPods corev1.PodList
if err := cfg.Client().Resources().List(ctx, &caddyPods, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(map[string]string{"app": util.Caddy}).String()
}); err != nil {
t.Fatal(err)
}
if len(caddyPods.Items) != 2 {
t.Fatal("missing pods in caddy deployment")
}
err := cfg.Client().Resources().Delete(ctx, ctx.Value(util.Redis).(*appsv1.Deployment))
if err != nil {
t.Fatal(err)
}
err = cfg.Client().Resources().Delete(ctx, ctx.Value(util.Caddy).(*appsv1.Deployment))
if err != nil {
t.Fatal(err)
}
for _, nodeName := range util.GetClusterNodes(t) {
err := wait.For(util.ContainerNotPresentOnNode(nodeName, util.Redis), wait.WithTimeout(util.Timeout))
if err != nil {
// Let's not mark this as an error
// We only have this to prevent race conditions with the eraser spinning up
t.Logf("error while waiting for deployment deletion: %v", err)
}
}
for _, nodeName := range util.GetClusterNodes(t) {
err := wait.For(util.ContainerNotPresentOnNode(nodeName, util.Caddy), wait.WithTimeout(util.Timeout))
if err != nil {
// Let's not mark this as an error
// We only have this to prevent race conditions with the eraser spinning up
t.Logf("error while waiting for deployment deletion: %v", err)
}
}
return ctx
}).
Assess("All non-running images are removed from the cluster", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
imgList := &eraserv1.ImageList{
ObjectMeta: metav1.ObjectMeta{Name: util.Prune},
Spec: eraserv1.ImageListSpec{
Images: []string{"*"},
},
}
if err := cfg.Client().Resources().Create(ctx, imgList); err != nil {
t.Fatal(err)
}
ctx = context.WithValue(ctx, util.Prune, imgList)
// The first check could take some extra time, where as things should be done already for the 2nd check.
// So we'll give plenty of time and fail slow here.
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.Redis)
ctxT, cancel = context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.Caddy)
// Make sure nginx is still there
util.CheckImagesExist(t, util.GetClusterNodes(t), util.Nginx)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, pruneImagesFeat)
}
================================================
FILE: test/e2e/tests/imagelist_prune_images/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
utilruntime.Must(eraserv1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.CollectorEnable.Set("false"),
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/imagelist_rm_images/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"time"
"github.com/eraser-dev/eraser/test/e2e/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/e2e-framework/klient/k8s/resources"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
const (
restartTimeout = time.Minute
)
func TestImageListTriggersRemoverImageJob(t *testing.T) {
rmImageFeat := features.New("An ImageList should trigger a remover ImageJob").
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
podSelectorLabels := map[string]string{"app": util.Nginx}
nginxDep := util.NewDeployment(cfg.Namespace(), util.Nginx, 2, podSelectorLabels, corev1.Container{Image: util.Nginx, Name: util.Nginx})
if err := cfg.Client().Resources().Create(ctx, nginxDep); err != nil {
t.Error("Failed to create the dep", err)
}
return ctx
}).
Assess("deployment successfully deployed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
resultDeployment := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: util.Nginx, Namespace: cfg.Namespace()},
}
if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&resultDeployment, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(util.Timeout)); err != nil {
t.Error("deployment not found", err)
}
return context.WithValue(ctx, util.Nginx, &resultDeployment)
}).
Assess("Images successfully deleted from all nodes", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// delete deployment
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
var pods corev1.PodList
err = client.Resources().List(ctx, &pods, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(labels.Set{"app": util.Nginx}).String()
})
if err != nil {
t.Fatal(err)
}
dep := ctx.Value(util.Nginx).(*appsv1.Deployment)
if err := client.Resources().Delete(ctx, dep); err != nil {
t.Error("Failed to delete the dep", err)
}
for _, nodeName := range util.GetClusterNodes(t) {
err := wait.For(util.ContainerNotPresentOnNode(nodeName, util.Nginx), wait.WithTimeout(util.Timeout))
if err != nil {
// Let's not mark this as an error
// We only have this to prevent race conditions with the eraser spinning up
t.Logf("error while waiting for deployment deletion: %v", err)
}
}
// deploy imageJob config
if err := util.DeployEraserConfig(cfg.KubeconfigFile(), cfg.Namespace(), util.EraserV1Alpha1ImagelistPath); err != nil {
t.Error("Failed to deploy image list config", err)
}
podNames := []string{}
// get eraser pod name
err = wait.For(func() (bool, error) {
l := corev1.PodList{}
err = client.Resources().List(ctx, &l, resources.WithLabelSelector(util.ImageJobTypeLabelKey+"="+util.ManualLabel))
if err != nil {
return false, err
}
if len(l.Items) != 3 {
return false, nil
}
for _, pod := range l.Items {
podNames = append(podNames, pod.ObjectMeta.Name)
}
return true, nil
}, wait.WithTimeout(time.Minute*2), wait.WithInterval(time.Millisecond*500))
if err != nil {
t.Fatal(err)
}
// wait for those specific pods to no longer exist, so that when we
// check later for an accidental redeployment, we are sure it is
// actually a new deployment.
err = wait.For(func() (bool, error) {
var l corev1.PodList
err = client.Resources().List(ctx, &l, resources.WithLabelSelector(util.ImageJobTypeLabelKey+"="+util.ManualLabel))
if err != nil {
return false, err
}
if len(l.Items) == 0 {
return true, nil
}
for _, name := range podNames {
for _, pod := range l.Items {
if name == pod.ObjectMeta.Name {
return false, nil
}
}
}
return true, nil
}, wait.WithTimeout(util.Timeout), wait.WithInterval(time.Millisecond*500))
if err != nil {
t.Fatal(err)
}
t.Logf("initial eraser deployment cleaned up")
ctxT, cancel := context.WithTimeout(ctx, time.Minute*3)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.Nginx)
return ctx
}).
Assess("Eraser job was not restarted", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// until a timeout is reached, make sure there are no pods matching
// the selector eraser.sh/type=manual
client := cfg.Client()
ctxT2, cancel := context.WithTimeout(ctx, restartTimeout)
defer cancel()
util.CheckDeploymentCleanedUp(ctxT2, t, client)
return ctx
}).
Feature()
util.Testenv.Test(t, rmImageFeat)
}
================================================
FILE: test/e2e/tests/imagelist_rm_images/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
collectorImage := util.ParsedImages.CollectorImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("true"),
"--set", util.CollectorImageRepo.Set(collectorImage.Repo),
"--set", util.CollectorImageTag.Set(collectorImage.Tag),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.ScheduleImmediate.Set("false"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/imagelist_skip_nodes/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientgo "k8s.io/client-go/kubernetes"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestSkipNodes(t *testing.T) {
skipNodesFeat := features.New("Applying the eraser.sh/cleanup.filter label to a node should prevent ImageJob pods from being scheduled on that node").
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// fetch node info
c := cfg.Client().RESTConfig()
k8sClient, err := clientgo.NewForConfig(c)
if err != nil {
t.Error("unable to obtain k8s client from config", err)
}
podSelectorLabels := map[string]string{"app": util.Nginx}
nginxDep := util.NewDeployment(cfg.Namespace(), util.Nginx, 2, podSelectorLabels, corev1.Container{Image: util.Nginx, Name: util.Nginx})
if err := cfg.Client().Resources().Create(ctx, nginxDep); err != nil {
t.Error("Failed to create the dep", err)
}
nodeList, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: util.FilterNodeSelector})
if err != nil {
t.Errorf("unable to list node %s\n%#v", util.FilterNodeSelector, err)
}
if len(nodeList.Items) != 1 {
t.Errorf("List operation for selector %s resulted in the wrong number of nodes", util.FilterNodeSelector)
}
nodeToSkip := &nodeList.Items[0]
nodeToSkip.ObjectMeta.Labels[util.FilterLabelKey] = util.FilterLabelValue
nodeToSkip, err = k8sClient.CoreV1().Nodes().Update(ctx, nodeToSkip, metav1.UpdateOptions{})
if err != nil {
t.Errorf("unable to update node %#v with label {%s: %s}\nerror: %#v", nodeToSkip, util.FilterLabelKey, util.FilterLabelValue, err)
}
return ctx
}).
Assess("Deployment and labelling the node have succeeded", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
c := cfg.Client().RESTConfig()
k8sClient, err := clientgo.NewForConfig(c)
if err != nil {
t.Error("unable to obtain k8s client from config", err)
}
err = wait.For(func() (bool, error) {
nodeList, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: util.FilterLabelKey})
if err != nil {
return false, err
}
return len(nodeList.Items) == 1, nil
}, wait.WithTimeout(util.Timeout))
if err != nil {
t.Errorf("error while waiting for selector%s to be added to node\n%#v", util.FilterNodeSelector, err)
}
resultDeployment := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: util.Nginx, Namespace: cfg.Namespace()},
}
if err = wait.For(
conditions.New(cfg.Client().Resources()).DeploymentConditionMatch(&resultDeployment, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(util.Timeout),
); err != nil {
t.Error("deployment not found", err)
}
return context.WithValue(ctx, util.Nginx, &resultDeployment)
}).
Assess("Node(s) successfully skipped", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// delete deployment
client, err := cfg.NewClient()
if err != nil {
t.Error("Failed to create new client", err)
}
var pods corev1.PodList
err = client.Resources().List(ctx, &pods, func(o *metav1.ListOptions) {
o.LabelSelector = labels.SelectorFromSet(labels.Set{"app": util.Nginx}).String()
})
if err != nil {
t.Fatal(err)
}
dep := ctx.Value(util.Nginx).(*appsv1.Deployment)
if err := client.Resources().Delete(ctx, dep); err != nil {
t.Error("Failed to delete the dep", err)
}
clusterNodes := util.GetClusterNodes(t)
clusterNodes = util.DeleteStringFromSlice(clusterNodes, util.FilterNodeName)
for _, nodeName := range clusterNodes {
err := wait.For(util.ContainerNotPresentOnNode(nodeName, util.Nginx), wait.WithTimeout(util.Timeout))
if err != nil {
// Let's not mark this as an error
// We only have this to prevent race conditions with the eraser spinning up
t.Logf("error while waiting for deployment deletion: %v", err)
}
}
// deploy imageJob config
if err := util.DeployEraserConfig(cfg.KubeconfigFile(), cfg.Namespace(), util.EraserV1Alpha1ImagelistPath); err != nil {
t.Error("Failed to deploy image list config", err)
}
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
// ensure images are removed from all nodes except the one we are skipping. remove the node we are skipping from the list of nodes.
util.CheckImageRemoved(ctxT, t, clusterNodes, util.Nginx)
// get pod logs before imagejob is deleted
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting collector pod logs", err)
}
// Wait for the imagejob to be completed by checking for its nonexistence in the cluster
err = wait.For(util.ImagejobNotInCluster(cfg.KubeconfigFile()), wait.WithTimeout(util.Timeout))
if err != nil {
t.Logf("error while waiting for imagejob cleanup: %v", err)
}
// the imagejob has done its work, so now we can check the node to make sure it didn't remove the image
util.CheckImagesExist(t, []string{util.FilterNodeName}, util.Nginx)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, skipNodesFeat)
}
================================================
FILE: test/e2e/tests/imagelist_skip_nodes/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.CollectorEnable.Set("false"),
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/metrics_test_disable_scanner/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"regexp"
"strconv"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
const (
expectedImagesRemoved = 3
)
func TestMetricsWithScannerDisabled(t *testing.T) {
metrics := features.New("Images_removed_run_total metric should report 1").
Assess("Alpine image is removed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.VulnerableImage)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Assess("Check images_removed_run_total metric", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if _, err := util.KubectlCurlPod(cfg.KubeconfigFile(), cfg.Namespace()); err != nil {
t.Error(err, "error running curl pod")
}
if _, err := util.KubectlWait(cfg.KubeconfigFile(), "temp", cfg.Namespace()); err != nil {
t.Error(err, "error waiting for temp curl pod")
}
output, err := util.KubectlExecCurl(cfg.KubeconfigFile(), "temp", "http://otel-collector/metrics", cfg.Namespace())
if err != nil {
t.Error(err, "error with otlp curl request")
}
r := regexp.MustCompile(`images_removed_run_total{job="remover",node_name=".+"} (\d+)`)
results := r.FindAllStringSubmatch(output, -1)
totalRemoved := 0
for i := range results {
val, _ := strconv.Atoi(results[i][1])
totalRemoved += val
}
if totalRemoved < expectedImagesRemoved {
t.Error("images_removed_run_total incorrect, expected ", expectedImagesRemoved, "got", totalRemoved)
}
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, metrics)
}
================================================
FILE: test/e2e/tests/metrics_test_disable_scanner/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
collectorImg := util.ParsedImages.CollectorImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.DeployOtelCollector(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.CollectorImage, util.CollectorTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.VulnerableImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("true"),
"--set", util.CollectorImageRepo.Set(collectorImg.Repo),
"--set", util.CollectorImageTag.Set(collectorImg.Tag),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.OTLPEndpoint.Set("otel-collector:4318"),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/metrics_test_eraser/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"regexp"
"strconv"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
const (
expectedImagesRemoved = 3
)
func TestMetricsEraserOnly(t *testing.T) {
metrics := features.New("Images_removed_run_total metric should report 1").
Assess("Alpine image is removed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
// deploy imagelist config
if err := util.DeployEraserConfig(cfg.KubeconfigFile(), cfg.Namespace(), util.ImagelistAlpinePath); err != nil {
t.Error("Failed to deploy image list config", err)
}
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.VulnerableImage)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Assess("Check images_removed_run_total metric", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if _, err := util.KubectlCurlPod(cfg.KubeconfigFile(), cfg.Namespace()); err != nil {
t.Error(err, "error running curl pod")
}
if _, err := util.KubectlWait(cfg.KubeconfigFile(), "temp", cfg.Namespace()); err != nil {
t.Error(err, "error waiting for temp curl pod")
}
output, err := util.KubectlExecCurl(cfg.KubeconfigFile(), "temp", "http://otel-collector/metrics", cfg.Namespace())
if err != nil {
t.Error(err, "error with otlp curl request")
}
r := regexp.MustCompile(`images_removed_run_total{job="remover",node_name=".+"} (\d+)`)
results := r.FindAllStringSubmatch(output, -1)
totalRemoved := 0
for i := range results {
val, _ := strconv.Atoi(results[i][1])
totalRemoved += val
}
if totalRemoved != expectedImagesRemoved {
t.Error("images_removed_run_total incorrect, expected ", expectedImagesRemoved, "got", totalRemoved)
}
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, metrics)
}
================================================
FILE: test/e2e/tests/metrics_test_eraser/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.DeployOtelCollector(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.VulnerableImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.CollectorEnable.Set("false"),
"--set", util.ScannerEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.OTLPEndpoint.Set("otel-collector:4318"),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/tests/metrics_test_scanner/eraser_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"context"
"regexp"
"strconv"
"testing"
"github.com/eraser-dev/eraser/test/e2e/util"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
const (
expectedVulnerableImages = 3
)
func TestMetricsWithScanner(t *testing.T) {
metrics := features.New("Images_removed_run_total and vulnerable_images_run_total metrics should report >= 3").
Assess("Alpine image is removed", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
ctxT, cancel := context.WithTimeout(ctx, util.Timeout)
defer cancel()
util.CheckImageRemoved(ctxT, t, util.GetClusterNodes(t), util.VulnerableImage)
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Assess("Check images_removed_run_total metric", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if _, err := util.KubectlCurlPod(cfg.KubeconfigFile(), cfg.Namespace()); err != nil {
t.Error(err, "error running curl pod")
}
if _, err := util.KubectlWait(cfg.KubeconfigFile(), "temp", cfg.Namespace()); err != nil {
t.Error(err, "error waiting for temp curl pod")
}
output, err := util.KubectlExecCurl(cfg.KubeconfigFile(), "temp", "http://otel-collector/metrics", cfg.Namespace())
if err != nil {
t.Error(err, "error with otlp curl request")
}
r := regexp.MustCompile(`images_removed_run_total{job="remover",node_name=".+"} (\d+)`)
results := r.FindAllStringSubmatch(output, -1)
totalRemoved := 0
for i := range results {
val, _ := strconv.Atoi(results[i][1])
totalRemoved += val
}
if totalRemoved < 3 {
t.Error("images_removed_run_total incorrect, expected 3, got", totalRemoved)
}
return ctx
}).
Assess("Check vulnerable_images_run_total metric", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
output, err := util.KubectlExecCurl(cfg.KubeconfigFile(), "temp", "http://otel-collector/metrics", cfg.Namespace())
if err != nil {
t.Error(err, "error with otlp curl request")
}
r := regexp.MustCompile(`vulnerable_images_run_total{job="trivy-scanner",node_name=".+"} (\d+)`)
results := r.FindAllStringSubmatch(output, -1)
totalVulnerable := 0
for i := range results {
val, _ := strconv.Atoi(results[i][1])
totalVulnerable += val
}
if totalVulnerable < expectedVulnerableImages {
t.Error("vulnerable_images_run_total incorrect, expected ", expectedVulnerableImages, "got", totalVulnerable)
}
return ctx
}).
Assess("Get logs", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
if err := util.GetPodLogs(t); err != nil {
t.Error("error getting eraser pod logs", err)
}
return ctx
}).
Feature()
util.Testenv.Test(t, metrics)
}
================================================
FILE: test/e2e/tests/metrics_test_scanner/main_test.go
================================================
//go:build e2e
// +build e2e
package e2e
import (
"os"
"testing"
eraserv1alpha1 "github.com/eraser-dev/eraser/api/v1alpha1"
"github.com/eraser-dev/eraser/test/e2e/util"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
)
func TestMain(m *testing.M) {
utilruntime.Must(eraserv1alpha1.AddToScheme(scheme.Scheme))
removerImage := util.ParsedImages.RemoverImage
managerImage := util.ParsedImages.ManagerImage
collectorImage := util.ParsedImages.CollectorImage
scannerImage := util.ParsedImages.ScannerImage
util.Testenv = env.NewWithConfig(envconf.New())
// Create KinD Cluster
util.Testenv.Setup(
envfuncs.CreateKindClusterWithConfig(util.KindClusterName, util.NodeVersion, util.KindConfigPath),
envfuncs.CreateNamespace(util.TestNamespace),
util.DeployOtelCollector(util.TestNamespace),
util.LoadImageToCluster(util.KindClusterName, util.ManagerImage, util.ManagerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.CollectorImage, util.CollectorTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.ScannerImage, util.ScannerTarballPath),
util.LoadImageToCluster(util.KindClusterName, util.VulnerableImage, ""),
util.LoadImageToCluster(util.KindClusterName, util.RemoverImage, util.RemoverTarballPath),
util.HelmDeployLatestEraserRelease(util.TestNamespace,
"--set", util.ScannerEnable.Set("false"),
"--set", util.CollectorEnable.Set("false"),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
),
util.UpgradeEraserHelm(util.TestNamespace,
"--set", util.OTLPEndpoint.Set("otel-collector:4318"),
"--set", util.ScannerEnable.Set("true"),
"--set", util.CollectorEnable.Set("true"),
"--set", util.CollectorImageRepo.Set(collectorImage.Repo),
"--set", util.CollectorImageTag.Set(collectorImage.Tag),
"--set", util.ScannerImageRepo.Set(scannerImage.Repo),
"--set", util.ScannerImageTag.Set(scannerImage.Tag),
"--set", util.RemoverImageRepo.Set(removerImage.Repo),
"--set", util.RemoverImageTag.Set(removerImage.Tag),
"--set", util.ManagerImageRepo.Set(managerImage.Repo),
"--set", util.ManagerImageTag.Set(managerImage.Tag),
"--set", util.CleanupOnSuccessDelay.Set("1m"),
),
).Finish(
envfuncs.DestroyKindCluster(util.KindClusterName),
)
os.Exit(util.Testenv.Run(m))
}
================================================
FILE: test/e2e/util/kubectl.go
================================================
// https://raw.githubusercontent.com/Azure/secrets-store-csi-driver-provider-azure/master/test/e2e/framework/exec/kubectl.go
package util
import (
"fmt"
"os/exec"
"strings"
klog "k8s.io/klog/v2"
)
// KubectlApply executes "kubectl apply" given a list of arguments.
func KubectlApply(kubeconfigPath, namespace string, args []string) error {
args = append([]string{
"apply",
fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
fmt.Sprintf("--namespace=%s", namespace),
}, args...)
_, err := Kubectl(args)
return err
}
// HelmInstall executes "helm install" given a list of arguments.
func HelmInstall(kubeconfigPath, namespace string, args []string) error {
args = append([]string{
"install",
"eraser-e2e-test",
"--wait",
"--debug",
"--create-namespace",
fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
fmt.Sprintf("--namespace=%s", namespace),
}, args...)
_, err := Helm(args)
return err
}
// HelmUpgrade executes "helm upgrade" given a list of arguments.
func HelmUpgrade(kubeconfigPath, namespace string, args []string) error {
args = append([]string{
"upgrade",
"eraser-e2e-test",
"--wait",
"--debug",
fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
fmt.Sprintf("--namespace=%s", namespace),
}, args...)
_, err := Helm(args)
return err
}
// HelmUninstall executes "helm uninstall" given a list of arguments.
func HelmUninstall(kubeconfigPath, namespace string, args []string) error {
args = append([]string{
"uninstall",
"eraser-e2e-test",
fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
fmt.Sprintf("--namespace=%s", namespace),
}, args...)
_, err := Helm(args)
return err
}
// KubectlDelete executes "kubectl delete" given a list of arguments.
func KubectlDelete(kubeconfigPath, namespace string, args []string) error {
args = append([]string{
"delete",
fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
fmt.Sprintf("--namespace=%s", namespace),
}, args...)
_, err := Kubectl(args)
return err
}
func KubectlExecCurl(kubeconfigPath, podName string, endpoint, namespace string) (string, error) {
args := []string{
"exec",
"-i",
podName,
"-n",
namespace,
"--kubeconfig",
kubeconfigPath,
"--",
"curl",
endpoint,
}
return Kubectl(args)
}
func KubectlWait(kubeconfigPath, podName, namespace string) (string, error) {
args := []string{
"wait",
"--for=condition=Ready",
fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
"--timeout=120s",
"pod",
podName,
"-n",
namespace,
}
return Kubectl(args)
}
// KubectlLogs executes "kubectl logs" given a list of arguments.
func KubectlLogs(kubeconfigPath, podName, containerName, namespace string, extraArgs ...string) (string, error) {
args := []string{
"logs",
fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
fmt.Sprintf("--namespace=%s", namespace),
podName,
}
if containerName != "" {
args = append(args, fmt.Sprintf("-c=%s", containerName))
}
args = append(args, extraArgs...)
return Kubectl(args)
}
// KubectlDescribe executes "kubectl describe" given a list of arguments.
func KubectlDescribe(kubeconfigPath, podName, namespace string) (string, error) {
args := []string{
"describe",
"pod",
podName,
fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
fmt.Sprintf("--namespace=%s", namespace),
}
return Kubectl(args)
}
// KubectlDescribe executes "kubectl describe" given a list of arguments.
func KubectlCurlPod(kubeconfigPath, namespace string) (string, error) {
args := []string{
"run",
"temp",
"-n",
namespace,
"--image",
"curlimages/curl",
"--kubeconfig",
kubeconfigPath,
"--",
"tail",
"-f",
"/dev/null",
}
return Kubectl(args)
}
// KubectlGet executes "kubectl get" given a list of arguments.
func KubectlGet(kubeconfigPath string, otherArgs ...string) (string, error) {
args := []string{
fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
"get",
}
args = append(args, otherArgs...)
return Kubectl(args)
}
func Kubectl(args []string) (string, error) {
klog.Infof("kubectl %s", strings.Join(args, " "))
cmd := exec.Command("kubectl", args...)
stdoutStderr, err := cmd.CombinedOutput()
output := strings.TrimSpace(string(stdoutStderr))
if err != nil {
err = fmt.Errorf("%w: %s", err, output)
}
return output, err
}
func KubectlBackground(args []string) error {
klog.Infof("kubectl %s", strings.Join(args, " "))
cmd := exec.Command("kubectl", args...)
if err := cmd.Start(); err != nil {
return fmt.Errorf("failed to start cmd: %v", err)
}
return nil
}
func Helm(args []string) (string, error) {
klog.Infof("helm %s", strings.Join(args, " "))
cmd := exec.Command("helm", args...)
stdoutStderr, err := cmd.CombinedOutput()
output := strings.TrimSpace(string(stdoutStderr))
if err != nil {
err = fmt.Errorf("%w: %s", err, output)
}
return output, err
}
================================================
FILE: test/e2e/util/utils.go
================================================
package util
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klog "k8s.io/klog/v2"
"oras.land/oras-go/pkg/registry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/e2e-framework/klient"
"sigs.k8s.io/e2e-framework/klient/k8s/resources"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/env"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/envfuncs"
"sigs.k8s.io/kind/pkg/cluster"
eraserv1 "github.com/eraser-dev/eraser/api/v1"
pkgUtil "github.com/eraser-dev/eraser/pkg/utils"
)
const (
providerResourceChartDir = "manifest_staging/charts"
providerResourceDeployDir = "manifest_staging/deploy"
publishedHelmRepo = "https://eraser-dev.github.io/eraser/charts"
KindClusterName = "eraser-e2e-test"
ProviderResource = "eraser.yaml"
Alpine = "alpine"
Nginx = "nginx"
NginxLatest = "ghcr.io/eraser-dev/eraser/e2e-test/nginx:latest"
NginxAliasOne = "ghcr.io/eraser-dev/eraser/e2e-test/nginx:one"
NginxAliasTwo = "ghcr.io/eraser-dev/eraser/e2e-test/nginx:two"
Redis = "redis"
Caddy = "caddy"
ImageCollectorShared = "imagecollector-shared"
Prune = "imagelist"
ImagePullSecret = "testsecret"
FilterNodeName = "eraser-e2e-test-worker"
FilterNodeSelector = "kubernetes.io/hostname=eraser-e2e-test-worker"
FilterLabelKey = "eraser.sh/cleanup.filter"
FilterLabelValue = "true"
)
const (
CollectorEnable = HelmPath("runtimeConfig.components.collector.enabled")
CollectorImageRepo = HelmPath("runtimeConfig.components.collector.image.repo")
CollectorImageTag = HelmPath("runtimeConfig.components.collector.image.tag")
ScannerConfig = HelmPath("runtimeConfig.components.scanner.config")
ScannerEnable = HelmPath("runtimeConfig.components.scanner.enabled")
ScannerImageRepo = HelmPath("runtimeConfig.components.scanner.image.repo")
ScannerImageTag = HelmPath("runtimeConfig.components.scanner.image.tag")
RemoverImageRepo = HelmPath("runtimeConfig.components.remover.image.repo")
RemoverImageTag = HelmPath("runtimeConfig.components.remover.image.tag")
ManagerImageRepo = HelmPath("deploy.image.repo")
ManagerImageTag = HelmPath("deploy.image.tag")
ImagePullSecrets = HelmPath("runtimeConfig.manager.pullSecrets")
OTLPEndpoint = HelmPath("runtimeConfig.manager.otlpEndpoint")
CleanupOnSuccessDelay = HelmPath("runtimeConfig.manager.imageJob.cleanup.delayOnSuccess")
FilterNodesType = HelmPath("runtimeConfig.manager.nodeFilter.type")
ScheduleImmediate = HelmPath("runtimeConfig.manager.scheduling.beginImmediately")
CustomRuntimeAddress = HelmPath("runtimeConfig.manager.runtime.address")
CustomRuntimeName = HelmPath("runtimeConfig.manager.runtime.name")
CollectorLabel = "collector"
ManualLabel = "manual"
ImageJobTypeLabelKey = "eraser.sh/type"
ManagerLabelKey = "control-plane"
ManagerLabelValue = "controller-manager"
)
var (
Testenv env.Environment
RemoverImage = os.Getenv("REMOVER_IMAGE")
ManagerImage = os.Getenv("MANAGER_IMAGE")
CollectorImage = os.Getenv("COLLECTOR_IMAGE")
ScannerImage = os.Getenv("SCANNER_IMAGE")
VulnerableImage = os.Getenv("VULNERABLE_IMAGE")
NonVulnerableImage = os.Getenv("NON_VULNERABLE_IMAGE")
EOLImage = os.Getenv("EOL_IMAGE")
BusyboxImage = os.Getenv("BUSYBOX_IMAGE")
CollectorDummyImage = os.Getenv("COLLECTOR_IMAGE_DUMMY")
RemoverTarballPath = os.Getenv("REMOVER_TARBALL_PATH")
ManagerTarballPath = os.Getenv("MANAGER_TARBALL_PATH")
CollectorTarballPath = os.Getenv("COLLECTOR_TARBALL_PATH")
ScannerTarballPath = os.Getenv("SCANNER_TARBALL_PATH")
ProjectAbsDir = os.Getenv("PROJECT_ABSOLUTE_PATH")
E2EPath = filepath.Join(ProjectAbsDir, "test", "e2e")
TestDataPath = filepath.Join(E2EPath, "test-data")
KindConfigPath = filepath.Join(E2EPath, "kind-config.yaml")
KindConfigCustomRuntimePath = filepath.Join(E2EPath, "kind-config-custom-runtime.yaml")
HelmEmptyValuesPath = filepath.Join(TestDataPath, "helm-empty-values.yaml")
ChartPath = filepath.Join(ProjectAbsDir, providerResourceChartDir)
DeployPath = filepath.Join(ProjectAbsDir, providerResourceDeployDir)
OTELCollectorConfigPath = filepath.Join(TestDataPath, "otelcollector.yaml")
EraserV1Alpha1ImagelistUpdatedPath = filepath.Join(TestDataPath, "eraser_v1alpha1_imagelist_updated.yaml")
EraserV1Alpha1ImagelistPath = filepath.Join(TestDataPath, "eraser_v1alpha1_imagelist.yaml")
EraserV1ImagelistPath = filepath.Join(TestDataPath, "eraser_v1_imagelist.yaml")
ImagelistAlpinePath = filepath.Join(TestDataPath, "imagelist_alpine.yaml")
NodeVersion = os.Getenv("NODE_VERSION")
ModifiedNodeImage = os.Getenv("MODIFIED_NODE_IMAGE")
TestNamespace = envconf.RandomName("test-ns", 16)
EraserNamespace = pkgUtil.GetNamespace()
TestLogDir = os.Getenv("TEST_LOGDIR")
ParsedImages *Images
Timeout = time.Minute * 20
ImagePullSecretJSON = fmt.Sprintf(`["%s"]`, ImagePullSecret)
ScannerConfigNoDeleteFailedJSON = `"{ \"cacheDir\": \"/var/lib/trivy\", \"dbRepo\": \"ghcr.io/aquasecurity/trivy-db\", \"deleteFailedImages\": false, \"deleteEOLImages\": true, \"vulnerabilities\": null, \"ignoreUnfixed\": true, \"types\": [ \"os\", \"library\" ], \"securityChecks\": [ \"vuln\" ], \"severities\": [ \"CRITICAL\", \"HIGH\", \"MEDIUM\", \"LOW\" ] }"`
ManagerAdditionalArgs = HelmSet{
key: "controllerManager.additionalArgs",
args: []string{"--delete-scan-failed-images=false"},
}
)
type (
RepoTag struct {
Repo string
Tag string
}
Images struct {
CollectorImage RepoTag
RemoverImage RepoTag
ManagerImage RepoTag
ScannerImage RepoTag
}
HelmPath string
HelmSet struct {
key string
args []string
}
)
func (hp HelmPath) Set(val string) string {
return fmt.Sprintf("%s=%s", hp, val)
}
func (hs *HelmSet) Set(val ...string) *HelmSet {
hs.args = append(hs.args, val...)
return hs
}
func (hs *HelmSet) String() string {
return fmt.Sprintf("%s={%s}", hs.key, strings.Join(hs.args, ","))
}
func init() {
var err error
ParsedImages, err = parsedImages(RemoverImage, ManagerImage, CollectorImage, ScannerImage)
if err != nil {
klog.Error(err)
panic(err)
}
}
func toRepoTag(ref registry.Reference) RepoTag {
var repoTag RepoTag
repoTag.Repo = fmt.Sprintf("%s/%s", ref.Registry, ref.Repository)
if repoTag.Repo == "/" {
repoTag.Repo = ""
}
repoTag.Tag = ref.Reference
return repoTag
}
func parsedImages(removerImage, managerImage, collectorImage, scannerImage string) (*Images, error) {
removerRepoTag, err := parseRepoTag(removerImage)
if err != nil {
return nil, err
}
collectorRepoTag, err := parseRepoTag(collectorImage)
if err != nil {
return nil, err
}
managerRepoTag, err := parseRepoTag(managerImage)
if err != nil {
return nil, err
}
scannerRepoTag, err := parseRepoTag(scannerImage)
if err != nil {
return nil, err
}
return &Images{
CollectorImage: collectorRepoTag,
RemoverImage: removerRepoTag,
ManagerImage: managerRepoTag,
ScannerImage: scannerRepoTag,
}, nil
}
func parseRepoTag(img string) (RepoTag, error) {
if img == "" {
return RepoTag{}, nil
}
ref, err := registry.ParseReference(img)
if err == nil {
return toRepoTag(ref), nil
}
// if true, this is an "unpublished" image, without a registry
if parts := strings.Split(img, "/"); len(parts) == 1 {
// the parser doesn't like unpublished images, so supply a dummy registry and pass it back to the parser
var result registry.Reference
result, err = registry.ParseReference(fmt.Sprintf("dummy.co/%s", img))
if err == nil {
return RepoTag{
// the registry info is discarded since it was a dummy registry
Repo: result.Repository,
Tag: result.Reference,
}, nil
}
}
return RepoTag{}, err
}
func LoadImageToCluster(clusterName, imageRef, tarballPath string) env.Func {
if strings.HasSuffix(tarballPath, ".tar") {
return envfuncs.LoadImageArchiveToCluster(clusterName, tarballPath)
}
return envfuncs.LoadDockerImageToCluster(clusterName, imageRef)
}
func HelmDeployLatestEraserRelease(namespace string, extraArgs ...string) env.Func {
return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) {
if os.Getenv("HELM_UPGRADE_TEST") == "" {
return ctx, nil
}
scriptTemplate := `
helm repo add eraser '%[1]s'
helm repo update
`
script := fmt.Sprintf(scriptTemplate, publishedHelmRepo)
//nolint:gosec // G204: Subprocess execution is intended for e2e test setup
addEraserRepoCmd := exec.Command("bash", "-ec", script)
if _, err := addEraserRepoCmd.CombinedOutput(); err != nil {
return ctx, err
}
allArgs := []string{"-f", HelmEmptyValuesPath}
allArgs = append(allArgs, "eraser/eraser")
allArgs = append(allArgs, extraArgs...)
if err := HelmInstall(cfg.KubeconfigFile(), namespace, allArgs); err != nil {
return ctx, err
}
client, err := cfg.NewClient()
if err != nil {
klog.ErrorS(err, "Failed to create new Client")
return ctx, err
}
// wait for the deployment to finish becoming available
eraserManagerDep := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: "eraser-controller-manager", Namespace: namespace},
}
if err := wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&eraserManagerDep, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(Timeout)); err != nil {
klog.ErrorS(err, "failed to deploy eraser manager")
return ctx, err
}
return ctx, nil
}
}
func IsNotFound(err error) bool {
return err != nil && client.IgnoreNotFound(err) == nil
}
func NewDeployment(namespace, name string, replicas int32, labels map[string]string, containers ...corev1.Container) *appsv1.Deployment {
if len(containers) == 0 {
containers = []corev1.Container{
{Image: "nginx", Name: "nginx"},
}
}
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
Spec: corev1.PodSpec{
Affinity: &corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchLabels: labels,
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
},
Containers: containers,
},
},
},
}
}
func NewPod(namespace, image, name, nodeName string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: corev1.PodSpec{
NodeName: nodeName,
Containers: []corev1.Container{
{
Name: name,
Image: image,
},
},
},
}
}
// deploy eraser config.
func DeployEraserConfig(kubeConfig, namespace, fileName string) error {
errApply := KubectlApply(kubeConfig, namespace, []string{"-f", fileName})
if errApply != nil {
return errApply
}
return nil
}
func NumPodsPresentForLabel(ctx context.Context, client klient.Client, num int, label string) func() (bool, error) {
return func() (bool, error) {
var pods corev1.PodList
err := client.Resources().List(ctx, &pods, resources.WithLabelSelector(label))
if err != nil {
return false, err
}
return len(pods.Items) == num, nil
}
}
func ContainerNotPresentOnNode(nodeName, containerName string) func() (bool, error) {
return func() (bool, error) {
output, err := ListNodeContainers(nodeName)
if err != nil {
return false, err
}
return !strings.Contains(output, containerName), nil
}
}
func ImagejobNotInCluster(kubeconfigPath string) func() (bool, error) {
return func() (bool, error) {
output, err := KubectlGet(kubeconfigPath, "imagejob")
if err != nil {
return false, err
}
return strings.Contains(output, "No resources"), nil
}
}
func GetImageJob(ctx context.Context, cfg *envconf.Config) (eraserv1.ImageJob, error) {
c, err := cfg.NewClient()
if err != nil {
return eraserv1.ImageJob{}, err
}
var ls eraserv1.ImageJobList
err = c.Resources().List(ctx, &ls)
if err != nil {
return eraserv1.ImageJob{}, err
}
if len(ls.Items) != 1 {
return eraserv1.ImageJob{}, errors.New("only one imagejob should be present")
}
return ls.Items[0], nil
}
func ListNodeContainers(nodeName string) (string, error) {
args := []string{
"exec",
nodeName,
"ctr",
"-n",
"k8s.io",
"containers",
"list",
}
//nolint:gosec // G204: Docker subprocess execution is intended for e2e tests
cmd := exec.Command("docker", args...)
stdoutStderr, err := cmd.CombinedOutput()
output := strings.TrimSpace(string(stdoutStderr))
if err != nil {
err = fmt.Errorf("%w: %s", err, output)
}
return output, err
}
func ListNodeImages(nodeName string) (string, error) {
args := []string{
"exec",
nodeName,
"ctr",
"-n",
"k8s.io",
"images",
"list",
}
//nolint:gosec // G204: Docker subprocess execution is intended for e2e tests
cmd := exec.Command("docker", args...)
stdoutStderr, err := cmd.CombinedOutput()
output := strings.TrimSpace(string(stdoutStderr))
if err != nil {
err = fmt.Errorf("%w: %s", err, output)
}
return output, err
}
// This lists nodes in the cluster, filtering out the control-plane.
func GetClusterNodes(t *testing.T) []string {
t.Helper()
provider := cluster.NewProvider(cluster.ProviderWithDocker())
nodeList, err := provider.ListNodes(KindClusterName)
if err != nil {
t.Fatal("Cannot list Kind node list", err)
}
var ourNodes []string
for i := range nodeList {
n := nodeList[i].String()
if !strings.Contains(n, "control-plane") {
ourNodes = append(ourNodes, n)
}
}
return ourNodes
}
func CheckImagesExist(t *testing.T, nodes []string, images ...string) {
t.Helper()
for _, node := range nodes {
nodeImages, err := ListNodeImages(node)
if err != nil {
t.Errorf("Cannot list images on node %s: %v", node, err)
continue
}
for _, image := range images {
if !strings.Contains(nodeImages, image) {
t.Errorf("image %s missing on node %s", image, node)
}
}
}
}
func CheckDeploymentCleanedUp(ctx context.Context, t *testing.T, client klient.Client) {
t.Helper()
for {
select {
case <-ctx.Done():
return
default:
var pods corev1.PodList
err := client.Resources().List(ctx, &pods, resources.WithLabelSelector(ImageJobTypeLabelKey+"="+ManualLabel))
if err != nil {
t.Fatalf("error listing images: %s", err)
}
if len(pods.Items) > 0 {
t.Errorf("imagejob got restarted when it shouldn't: %d manual pods still present", len(pods.Items))
t.FailNow()
}
}
time.Sleep(time.Second * 2)
}
}
func CheckImageRemoved(ctx context.Context, t *testing.T, nodes []string, images ...string) {
t.Helper()
cleaned := make(map[string]bool)
for len(cleaned) < len(nodes) {
select {
case <-ctx.Done():
t.Error("timeout waiting for images to be cleaned")
return
default:
}
for _, node := range nodes {
done := cleaned[node]
if done {
continue
}
nodeImages, err := ListNodeImages(node)
if err != nil {
t.Error("Cannot list images", err)
}
var found int
for _, img := range images {
if !strings.Contains(nodeImages, img) {
found++
}
}
if found == len(images) {
cleaned[node] = true
}
}
time.Sleep(time.Second)
}
if len(cleaned) < len(nodes) {
t.Error("not all nodes cleaned")
}
}
func DockerPullImage(image string) (string, error) {
args := []string{"pull", image}
//nolint:gosec // G204: Docker subprocess execution is intended for e2e tests
cmd := exec.Command("docker", args...)
stdoutStderr, err := cmd.CombinedOutput()
output := strings.TrimSpace(string(stdoutStderr))
if err != nil {
err = fmt.Errorf("%w: %s", err, output)
}
return output, err
}
func DockerTagImage(image, tag string) (string, error) {
args := []string{"tag", image, tag}
//nolint:gosec // G204: Docker subprocess execution is intended for e2e tests
cmd := exec.Command("docker", args...)
stdoutStderr, err := cmd.CombinedOutput()
output := strings.TrimSpace(string(stdoutStderr))
if err != nil {
err = fmt.Errorf("%w: %s", err, output)
}
return output, err
}
func DeleteImageListsAndJobs(kubeConfig string) error {
if err := KubectlDelete(kubeConfig, "", []string{"imagejob", "--all"}); err != nil {
return err
}
return KubectlDelete(kubeConfig, "", []string{"imagelist", "--all"})
}
func DeleteStringFromSlice(strings []string, s string) []string {
idx := -1
for i, cmp := range strings {
if cmp == s {
idx = i
break
}
}
if idx >= 0 {
l := len(strings)
strings[l-1], strings[idx] = strings[idx], strings[l-1]
return strings[:l-1]
}
return strings
}
func DeployEraserHelm(namespace string, args ...string) env.Func {
return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) {
providerResourceAbsolutePath := filepath.Join(ChartPath, "eraser")
// start deployment
allArgs := []string{providerResourceAbsolutePath, "-f", HelmEmptyValuesPath}
allArgs = append(allArgs, args...)
if err := HelmInstall(cfg.KubeconfigFile(), namespace, allArgs); err != nil {
return ctx, err
}
client, err := cfg.NewClient()
if err != nil {
klog.ErrorS(err, "Failed to create new Client")
return ctx, err
}
// wait for the deployment to finish becoming available
eraserManagerDep := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: "eraser-controller-manager", Namespace: namespace},
}
if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&eraserManagerDep, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(Timeout)); err != nil {
klog.ErrorS(err, "failed to deploy eraser manager")
return ctx, err
}
return ctx, nil
}
}
func UpgradeEraserHelm(namespace string, args ...string) env.Func {
return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) {
providerResourceAbsolutePath := filepath.Join(ChartPath, "eraser")
// start deployment
allArgs := []string{providerResourceAbsolutePath, "-f", HelmEmptyValuesPath}
allArgs = append(allArgs, args...)
if os.Getenv("HELM_UPGRADE_TEST") == "" {
allArgs = append(allArgs, "--install")
}
if err := HelmUpgrade(cfg.KubeconfigFile(), namespace, allArgs); err != nil {
return ctx, err
}
client, err := cfg.NewClient()
if err != nil {
klog.ErrorS(err, "Failed to create new Client")
return ctx, err
}
// wait for the deployment to finish becoming available
eraserManagerDep := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: "eraser-controller-manager", Namespace: namespace},
}
if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&eraserManagerDep, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(Timeout)); err != nil {
klog.ErrorS(err, "failed to deploy eraser manager")
return ctx, err
}
return ctx, nil
}
}
func DeployOtelCollector(namespace string) env.Func {
return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) {
// start otelcollector deployment
otelargs := []string{"-f", OTELCollectorConfigPath}
if err := KubectlApply(cfg.KubeconfigFile(), namespace, otelargs); err != nil {
return ctx, err
}
client, err := cfg.NewClient()
if err != nil {
klog.ErrorS(err, "Failed to create new Client")
return ctx, err
}
// wait for the deployment to finish becoming available
otelCollectorDep := appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Name: "otel-collector", Namespace: namespace},
}
if err = wait.For(conditions.New(client.Resources()).DeploymentConditionMatch(&otelCollectorDep, appsv1.DeploymentAvailable, corev1.ConditionTrue),
wait.WithTimeout(Timeout)); err != nil {
klog.ErrorS(err, "failed to deploy otelcollector")
return ctx, err
}
return ctx, nil
}
}
func GetPodLogs(t *testing.T) error {
for _, nodeName := range []string{"eraser-e2e-test-control-plane", "eraser-e2e-test-worker", "eraser-e2e-test-worker2"} {
testName := strings.Split(t.Name(), "/")[0]
path := filepath.Join(TestLogDir, testName, nodeName)
if err := os.MkdirAll(path, 0o750); err != nil {
t.Logf("error: %s", err)
continue
}
t.Logf(`docker cp %s:/var/log/containers %s`, nodeName, path)
cmd := exec.Command("docker", "cp", nodeName+":/var/log/containers", path) //nolint:gosec
output, err := cmd.CombinedOutput()
if err != nil {
t.Logf("error: %s\n%s", err, string(output))
continue
}
t.Logf(`docker cp %s:/var/log/pods %s`, nodeName, path)
cmd2 := exec.Command("docker", "cp", nodeName+":/var/log/pods", path) //nolint:gosec
output, err = cmd2.CombinedOutput()
if err != nil {
t.Logf("error: %s\n%s", err, string(output))
continue
}
}
return nil
}
func MakeDeploy(env map[string]string) env.Func {
return func(ctx context.Context, _ *envconf.Config) (context.Context, error) {
args := []string{"deploy"}
for k, v := range env {
args = append(args, fmt.Sprintf("%s=%s", k, v))
}
cmd := exec.Command("make", args...)
cmd.Dir = ProjectAbsDir
out, err := cmd.CombinedOutput()
if err != nil {
fmt.Fprint(os.Stderr, string(out))
return ctx, err
}
klog.Info(string(out))
return ctx, nil
}
}
func DeployEraserManifest(namespace, fileName string) env.Func {
return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) {
if err := DeployEraserConfig(cfg.KubeconfigFile(), namespace, filepath.Join(DeployPath, fileName)); err != nil {
return ctx, err
}
return ctx, nil
}
}
func CreateExclusionList(namespace string, list string) env.Func {
return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) {
c, err := cfg.NewClient()
if err != nil {
return ctx, err
}
// create excluded configmap and add docker.io/library/alpine
excluded := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "excluded",
Namespace: namespace,
Labels: map[string]string{"eraser.sh/exclude.list": "true"},
},
Data: map[string]string{"excluded.json": list},
}
if err := cfg.Client().Resources().Create(ctx, &excluded); err != nil {
return ctx, err
}
cMap := corev1.ConfigMap{}
err = wait.For(func() (bool, error) {
err := c.Resources().Get(ctx, excluded.Name, namespace, &cMap)
if IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
if cMap.Name == excluded.Name {
return true, nil
}
return false, nil
}, wait.WithTimeout(Timeout))
if err != nil {
return ctx, err
}
return ctx, nil
}
}
================================================
FILE: test/e2e/util/utils_test.go
================================================
package util
import (
"testing"
"k8s.io/klog/v2"
)
func TestParseRepoTag(t *testing.T) {
cases := []struct {
input string
expected RepoTag
expectErr bool
}{
{
input: "ghcr.io/repo/one/two:three",
expected: RepoTag{
Repo: "ghcr.io/repo/one/two",
Tag: "three",
},
expectErr: false,
},
{
input: "ghcr.io/one:two",
expected: RepoTag{
Repo: "ghcr.io/one",
Tag: "two",
},
expectErr: false,
},
{
input: "eraser:e2e-test",
expected: RepoTag{
Repo: "eraser",
Tag: "e2e-test",
},
expectErr: false,
},
{
input: "eraser@sha256:4dca0fd5f424a31b03ab807cbae77eb32bf2d089eed1cee154b3afed458de0dc",
expected: RepoTag{
Repo: "eraser",
Tag: "sha256:4dca0fd5f424a31b03ab807cbae77eb32bf2d089eed1cee154b3afed458de0dc",
},
expectErr: false,
},
{
input: "eraser:sha256:4dca0fd5f424a31b03ab807cbae77eb32bf2d089eed1cee154b3afed458de0dc",
expected: RepoTag{
Repo: "eraser",
Tag: "sha256:4dca0fd5f424a31b03ab807cbae77eb32bf2d089eed1cee154b3afed458de0dc",
},
expectErr: false,
},
{
input: "eraser@sha256:4dca0fd5f4:4a31b03ab807cbae77eb32bf2d089eed1cee154b3afed458de0dc",
expected: RepoTag{
Repo: "",
Tag: "",
},
expectErr: true,
},
{
input: "docker.io/nginx@sha256:4dca0fd5f424a31b03ab807cbae77eb32bf2d089eed1cee154b3afed458de0dc",
expected: RepoTag{
Repo: "docker.io/nginx",
Tag: "sha256:4dca0fd5f424a31b03ab807cbae77eb32bf2d089eed1cee154b3afed458de0dc",
},
expectErr: false,
},
{
input: "docker.io/library/nginx@sha256:4dca0fd5f424a31b03ab807cbae77eb32bf2d089eed1cee154b3afed458de0dc",
expected: RepoTag{
Repo: "docker.io/library/nginx",
Tag: "sha256:4dca0fd5f424a31b03ab807cbae77eb32bf2d089eed1cee154b3afed458de0dc",
},
expectErr: false,
},
{
input: "docker.io/nginx@sha256:4dca0fd5f4",
expected: RepoTag{
Repo: "",
Tag: "",
},
expectErr: true,
},
{
input: "docker.io/nginx@sha256:gggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggggg",
expected: RepoTag{
Repo: "",
Tag: "",
},
expectErr: true,
},
{
input: "docker.io/library/nginx@sha123:4dca0fd5f424a31b03ab807cbae77eb32bf2d089eed1cee154b3afed458de0dc",
expected: RepoTag{
Repo: "",
Tag: "",
},
expectErr: true,
},
{
input: "",
expected: RepoTag{
Repo: "",
Tag: "",
},
expectErr: false,
},
{
input: ":",
expected: RepoTag{
Repo: "",
Tag: "",
},
expectErr: true,
},
{
input: "/",
expected: RepoTag{
Repo: "",
Tag: "",
},
expectErr: true,
},
}
for _, c := range cases {
result, err := parseRepoTag(c.input)
if err != nil {
if c.expectErr {
continue
}
klog.Errorf("error from parsing function: %#v\ninput: %s\nexpected: %#v\ngot: %#v", err, c.input, c.expected, result)
t.FailNow()
}
if c.expectErr {
klog.Errorf("expected error parsing reference `%s`, but did not receive one", c.input)
t.Fail()
}
if result.Repo != c.expected.Repo || result.Tag != c.expected.Tag {
klog.Errorf("wrong result\ninput: %s\nexpected: %#v\ngot: %#v", c.input, c.expected, result)
t.Fail()
}
}
}
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/README.md
================================================
# gatekeeper/helmify
Forked from https://github.com/open-policy-agent/gatekeeper (v3.5.0-rc.1).
The helmify helps auto-generate the helm chart from manifest to avoid any drifts
The original code can be found at https://github.com/open-policy-agent/gatekeeper/tree/master/cmd/build/helmify.
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/kustomization.yaml
================================================
namespace: "{{ .Release.Namespace }}"
commonLabels:
app.kubernetes.io/name: '{{ template "eraser.name" . }}'
helm.sh/chart: '{{ template "eraser.name" . }}'
app.kubernetes.io/managed-by: '{{ .Release.Service }}'
app.kubernetes.io/instance: "{{ .Release.Name }}"
bases:
- "../../../../config/default"
patchesStrategicMerge:
- kustomize-for-helm.yaml
patchesJson6902:
# these are defined in the chart values rather than hard-coded
- target:
kind: Deployment
name: eraser-controller-manager
patch: |-
- op: remove
path: /spec/template/spec/containers/0/resources/limits
- op: remove
path: /spec/template/spec/containers/0/resources/requests
- op: remove
path: /spec/template/spec/nodeSelector/kubernetes.io~1os
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/kustomize-for-helm.yaml
================================================
apiVersion: apps/v1
kind: Deployment
metadata:
name: eraser-controller-manager
namespace: eraser-system
spec:
template:
metadata:
labels:
HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_ADDITIONALPODLABELS: ""
spec:
HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_PULL_SECRETS: ""
volumes:
- name: eraser-manager-config
configMap:
name: eraser-manager-config
containers:
- name: manager
args:
- --config=/config/controller_manager_config.yaml
- HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_ADDITIONAL_ARGS
command:
- /manager
image: "{{ .Values.deploy.image.repo }}:{{ .Values.deploy.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: "{{ .Values.deploy.image.pullPolicy }}"
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_CONTAINER_RESOURCES: ""
securityContext:
allowPrivilegeEscalation: false
volumeMounts:
- name: eraser-manager-config
mountPath: /config
nodeSelector:
HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_NODESELECTOR: ""
tolerations:
HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_TOLERATIONS: ""
affinity:
HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_AFFINITY: ""
priorityClassName: "{{ .Values.deploy.priorityClassName }}"
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/main.go
================================================
package main
import (
"bufio"
"flag"
"fmt"
"log"
"os"
"path"
"regexp"
"strings"
)
var outputDir = flag.String("output-dir", "manifest_staging/charts/eraser", "The root directory in which to write the Helm chart")
var kindRegex = regexp.MustCompile(`(?m)^kind:[\s]+([\S]+)[\s]*$`)
// use exactly two spaces to be sure we are capturing metadata.name.
var nameRegex = regexp.MustCompile(`(?m)^ name:[\s]+([\S]+)[\s]*$`)
func extractKind(s string) (string, error) {
matches := kindRegex.FindStringSubmatch(s)
if len(matches) != 2 {
return "", fmt.Errorf("%s does not have a kind", s)
}
return strings.Trim(matches[1], `"'`), nil
}
func extractName(s string) (string, error) {
matches := nameRegex.FindStringSubmatch(s)
if len(matches) != 2 {
return "", fmt.Errorf("%s does not have a name", s)
}
return strings.Trim(matches[1], `"'`), nil
}
type kindSet struct {
byKind map[string][]string
}
func (ks *kindSet) Add(obj string) error {
kind, err := extractKind(obj)
if err != nil {
return err
}
objs, ok := ks.byKind[kind]
if !ok {
objs = []string{obj}
} else {
objs = append(objs, obj)
}
ks.byKind[kind] = objs
return nil
}
func (ks *kindSet) Write() error {
for kind, objs := range ks.byKind {
subPath := "templates"
nameExtractor := extractName
if kind == "Namespace" {
continue
}
for _, obj := range objs {
name, err := nameExtractor(obj)
if err != nil {
return err
}
fileName := fmt.Sprintf("%s-%s.yaml", strings.ToLower(name), strings.ToLower(kind))
destFile := path.Join(*outputDir, subPath, fileName)
fmt.Printf("Writing %s\n", destFile)
if err := os.WriteFile(destFile, []byte(obj), 0o600); err != nil {
return err
}
}
}
return nil
}
func doReplacements(obj string) string {
for old, new := range replacements {
obj = strings.ReplaceAll(obj, old, new)
}
return obj
}
func copyStaticFiles(root string, subdirs ...string) error {
p := path.Join(append([]string{root}, subdirs...)...)
files, err := os.ReadDir(p)
if err != nil {
return err
}
for _, f := range files {
newSubDirs := append([]string{}, subdirs...)
newSubDirs = append(newSubDirs, f.Name())
destination := path.Join(append([]string{*outputDir}, newSubDirs...)...)
if f.IsDir() {
fmt.Printf("Making %s\n", destination)
if err := os.Mkdir(destination, 0o750); err != nil {
return err
}
if err := copyStaticFiles(root, newSubDirs...); err != nil {
return err
}
} else {
contents, err := os.ReadFile(path.Join(p, f.Name())) // #nosec G304
if err != nil {
return err
}
fmt.Printf("Writing %s\n", destination)
if err := os.WriteFile(destination, contents, 0o600); err != nil {
return err
}
}
}
return nil
}
func main() {
flag.Parse()
scanner := bufio.NewScanner(os.Stdin)
kinds := kindSet{byKind: make(map[string][]string)}
b := strings.Builder{}
notate := func() {
obj := doReplacements(b.String())
b.Reset()
if err := kinds.Add(obj); err != nil {
log.Fatalf("Error adding object: %s, %s", err, b.String())
}
}
for scanner.Scan() {
if strings.HasPrefix(scanner.Text(), "---") {
if b.Len() > 0 {
notate()
}
} else {
b.WriteString(scanner.Text())
b.WriteString("\n")
}
}
if b.Len() > 0 {
notate()
}
if err := copyStaticFiles("third_party/open-policy-agent/gatekeeper/helmify/static"); err != nil {
log.Fatal(err)
}
if err := kinds.Write(); err != nil {
log.Fatal(err)
}
}
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/replacements.go
================================================
package main
var replacements = map[string]string{
`HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_CONTAINER_RESOURCES: ""`: `{{- toYaml .Values.deploy.resources | nindent 10 }}`,
`HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_NODESELECTOR: ""`: `{{- toYaml .Values.deploy.nodeSelector | nindent 8 }}`,
`HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_TOLERATIONS: ""`: `{{- toYaml .Values.deploy.tolerations | nindent 8 }}`,
`HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_AFFINITY: ""`: `{{- toYaml .Values.deploy.affinity | nindent 8 }}`,
`- HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_ADDITIONAL_ARGS`: `{{- if .Values.deploy.additionalArgs }}{{- range .Values.deploy.additionalArgs }}{{ nindent 8 "- " }}{{ . }}{{- end -}}{{ end }}`,
`HELMSUBST_CONTROLLER_MANAGER_CONFIG_YAML`: `{{- toYaml .Values.runtimeConfig | nindent 4 }}`,
`HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_ADDITIONALPODLABELS: ""`: `{{- if .Values.deploy.additionalPodLabels }}{{- toYaml .Values.deploy.additionalPodLabels | nindent 8 }}{{end}}`,
`HELMSUBST_DEPLOYMENT_CONTROLLER_MANAGER_PULL_SECRETS: ""`: `{{- if .Values.runtimeConfig.manager.pullSecrets }}
imagePullSecrets:
{{- range .Values.runtimeConfig.manager.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}`,
}
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/static/.helmignore
================================================
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/static/Chart.yaml
================================================
apiVersion: v2
name: eraser
description: A Helm chart for Eraser
type: application
version: 1.5.0-beta.0
appVersion: v1.5.0-beta.0
home: https://github.com/eraser-dev/eraser
sources:
- https://github.com/eraser-dev/eraser.git
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/static/README.md
================================================
# Eraser Helm Chart
## Contributing Changes
This Helm chart is autogenerated from the Eraser static manifest. The generator code lives under third_party/open-policy-agent/gatekeeper/helmify. To make modifications to this template, please edit kustomization.yaml, kustomize-for-helm.yaml and replacements.go under that directory and then run make manifests. Your changes will show up in the manifest_staging directory and will be promoted to the root charts directory the next time an Eraser release is cut.
## Get Repo Info
```console
helm repo add eraser https://eraser-dev.github.io/eraser/charts
helm repo update
```
_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._
## Install Chart
```console
# Helm install with eraser-system namespace already created
$ helm install -n eraser-system [RELEASE_NAME] eraser/eraser
# Helm install and create namespace
$ helm install -n eraser-system [RELEASE_NAME] eraser/eraser --create-namespace
```
_See [parameters](#parameters) below._
_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._
## Parameters
| Parameter | Description | Default |
| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ------------------------------ |
| runtimeConfig.health | Settings for the health server. | `{}` |
| runtimeConfig.metrics | Settings for the metrics server. | `{}` |
| runtimeConfig.webhook | Settings for the webhook server. | `{}` |
| runtimeConfig.leaderElection | Settings for leader election. | `{}` |
| runtimeConfig.manager.runtime | The container runtime to use. | `containerd` |
| runtimeConfig.manager.otlpEndpoint | The OTLP endpoint to send metrics to. | `""` |
| runtimeConfig.manager.logLevel | The logging level for the manager. | `info` |
| runtimeConfig.manager.scheduling | Settings for scheduling. | `{}` |
| runtimeConfig.manager.profile | Settings for the profiler. | `{}` |
| runtimeConfig.manager.imageJob.successRatio | The minimum ratio of successful image jobs required for the overall job to be considered successful. | `1.0` |
| runtimeConfig.manager.imageJob.cleanup | Settings for image job cleanup. | `{}` |
| runtimeConfig.manager.pullSecrets | Image pull secrets for collector/scanner/eraser. | `[]` |
| runtimeConfig.manager.priorityClassName | Priority class name for collector/scanner/eraser. | `""` |
| runtimeConfig.manager.additionalPodLabels | Additional labels for all pods that the controller creates at runtime. | `{}` |
| runtimeConfig.manager.nodeFilter | Filter for nodes. | `{}` |
| runtimeConfig.components.collector | Settings for the collector component. | `{ enabled: true }` |
| runtimeConfig.components.scanner | Settings for the scanner component. | `{ enabled: true }` |
| runtimeConfig.components.eraser | Settings for the eraser component. | `{}` |
| deploy.image.repo | Repository for the image. | `ghcr.io/eraser-dev/eraser-manager` |
| deploy.image.pullPolicy | Policy for pulling the image. | `IfNotPresent` |
| deploy.image.tag | Overrides the default image tag. | `""` |
| deploy.additionalArgs | Additional arguments to pass to the command. | `[]` |
| deploy.priorityClassName | Priority class name. | `""` |
| deploy.additionalPodLabels | Additional labels for the controller pod. | `{}` |
| deploy.securityContext.allowPrivilegeEscalation | Whether to allow privilege escalation. | `false` |
| deploy.resources.limits.memory | Memory limit for the resources. | `30Mi` |
| deploy.resources.requests.cpu | CPU request for the resources. | `100m` |
| deploy.resources.requests.memory | Memory request for the resources. | `20Mi` |
| deploy.nodeSelector | Node Selector for manager. | kubernetes.io/os: linux |
| deploy.tolerations | Tolerations for the manager. | [] |
| deploy.affinity | Affinity for the manager. | {} |
| nameOverride | Override name if needed. | "" |
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/static/templates/_helpers.tpl
================================================
{{/*
Return the name of the chart. Use Values.nameOverride but if null use Chart.Name
*/}}
{{- define "eraser.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end -}}
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/static/templates/configmap.yaml
================================================
apiVersion: v1
kind: ConfigMap
metadata:
name: eraser-manager-config
namespace: "{{ .Release.Namespace }}"
data:
controller_manager_config.yaml: |
{{- toYaml .Values.runtimeConfig | nindent 4 }}
================================================
FILE: third_party/open-policy-agent/gatekeeper/helmify/static/values.yaml
================================================
runtimeConfig:
apiVersion: eraser.sh/v1alpha3
kind: EraserConfig
health: {}
# healthProbeBindAddress: :8081
metrics: {}
# bindAddress: 127.0.0.1:8080
webhook: {}
# port: 9443
leaderElection: {}
# leaderElect: true
# resourceName: e29e094a.k8s.io
manager:
runtime:
name: containerd
address: unix:///run/containerd/containerd.sock
otlpEndpoint: ""
logLevel: info
scheduling: {}
# repeatInterval: ""
# beginImmediately: true
profile: {}
# enabled: false
# port: 0
imageJob:
successRatio: 1.0
cleanup: {}
# delayOnSuccess: ""
# delayOnFailure: ""
pullSecrets: [] # image pull secrets for collector/scanner/eraser
priorityClassName: "" # priority class name for collector/scanner/eraser
additionalPodLabels: {}
nodeFilter:
type: exclude # must be either exclude|include
selectors:
- eraser.sh/cleanup.filter
- kubernetes.io/os=windows
components:
collector:
enabled: true
image:
# repo: ""
tag: "v1.5.0-beta.0"
request: {}
# mem: ""
# cpu: ""
limit: {}
# mem: ""
# cpu: ""
scanner:
enabled: true
image:
# repo: ""
tag: "v1.5.0-beta.0"
request: {}
# mem: ""
# cpu: ""
limit: {}
# mem: ""
# cpu: ""
config: "" # |
# cacheDir: /var/lib/trivy
# dbRepo: ghcr.io/aquasecurity/trivy-db
# deleteFailedImages: true
# deleteEOLImages: true
# vulnerabilities:
# ignoreUnfixed: false
# types:
# - os
# - library
# securityChecks:
# - vuln
# severities:
# - CRITICAL
# - HIGH
# - MEDIUM
# - LOW
# ignoredStatuses:
# timeout:
# total: 23h
# perImage: 1h
remover:
image:
# repo: ""
tag: "v1.5.0-beta.0"
request: {}
# mem: ""
# cpu: ""
limit: {}
# mem: ""
# cpu: ""
deploy:
image:
repo: ghcr.io/eraser-dev/eraser-manager
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "v1.5.0-beta.0"
additionalArgs: []
priorityClassName: ""
additionalPodLabels: {}
securityContext:
allowPrivilegeEscalation: false
resources:
limits:
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
nodeSelector:
kubernetes.io/os: linux
tolerations: []
affinity: {}
nameOverride: ""
================================================
FILE: version/version.go
================================================
// Package version provides build version and information for the eraser application.
package version
import (
"fmt"
"runtime"
)
var (
// BuildVersion is version set on build.
BuildVersion string
// DefaultRepo is the default repo for images.
DefaultRepo = "ghcr.io/eraser-dev"
// buildTime is the date for the binary build.
buildTime string
// vcsCommit is the commit hash for the binary build.
vcsCommit string
)
// GetUserAgent returns a user agent of the format eraser// (/) /.
func GetUserAgent(component string) string {
return fmt.Sprintf("eraser/%s/%s (%s/%s) %s/%s", component, BuildVersion, runtime.GOOS, runtime.GOARCH, vcsCommit, buildTime)
}
================================================
FILE: version/version_test.go
================================================
package version
import (
"fmt"
"runtime"
"strings"
"testing"
)
func TestGetUserAgent(t *testing.T) {
buildTime = "Now"
BuildVersion = "version"
vcsCommit = "hash"
expected := fmt.Sprintf("eraser/manager/%s (%s/%s) %s/%s", BuildVersion, runtime.GOOS, runtime.GOARCH, vcsCommit, buildTime)
actual := GetUserAgent("manager")
if !strings.EqualFold(expected, actual) {
t.Fatalf("expected: %s, got: %s", expected, actual)
}
}