Repository: controlplaneio/netassert
Branch: master
Commit: bcb1d6125b85
Files: 118
Total size: 508.2 KB
Directory structure:
gitextract_igc5nwum/
├── .dockerignore
├── .editorconfig
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bug_report.md
│ │ ├── feature_request.md
│ │ └── question.md
│ ├── PULL_REQUEST_TEMPLATE/
│ │ └── pull_request_template.md
│ └── workflows/
│ ├── build.yaml
│ └── release.yaml
├── .gitignore
├── .goreleaser.yaml
├── .hadolint.yaml
├── .yamllint.yaml
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE
├── README.md
├── SECURITY.md
├── cmd/
│ └── netassert/
│ └── cli/
│ ├── common.go
│ ├── gen_result.go
│ ├── main.go
│ ├── ping.go
│ ├── root.go
│ ├── run.go
│ ├── validate.go
│ └── version.go
├── download.sh
├── e2e/
│ ├── README.md
│ ├── clusters/
│ │ ├── aws-eks-terraform-module/
│ │ │ ├── eks.tf
│ │ │ ├── outputs.tf
│ │ │ ├── variables.tf
│ │ │ └── vpc.tf
│ │ ├── eks-with-calico-cni/
│ │ │ ├── calico-3.26.4.yaml
│ │ │ └── terraform/
│ │ │ ├── main.tf
│ │ │ └── vars.tf
│ │ ├── eks-with-vpc-cni/
│ │ │ └── terraform/
│ │ │ ├── main.tf
│ │ │ └── vars.tf
│ │ ├── gke-dataplanev2/
│ │ │ ├── main.tf
│ │ │ └── variables.tf
│ │ ├── gke-vpc/
│ │ │ ├── main.tf
│ │ │ └── variables.tf
│ │ └── kind/
│ │ └── kind-config.yaml
│ ├── e2e_test.go
│ ├── helpers/
│ │ ├── common.go
│ │ ├── eks.go
│ │ ├── gke.go
│ │ └── kind.go
│ └── manifests/
│ ├── networkpolicies.yaml
│ ├── test-cases.yaml
│ └── workload.yaml
├── fluxcd-demo/
│ ├── README.md
│ ├── fluxcd-helmconfig.yaml
│ ├── helm/
│ │ ├── Chart.yaml
│ │ ├── templates/
│ │ │ ├── _helpers.tpl
│ │ │ ├── deployment.yaml
│ │ │ ├── pod1-pod2.yaml
│ │ │ ├── post-deploy-tests.yaml
│ │ │ └── statefulset.yaml
│ │ └── values.yaml
│ └── kind-cluster.yaml
├── go.mod
├── go.sum
├── helm/
│ ├── Chart.yaml
│ ├── README.md
│ ├── templates/
│ │ ├── NOTES.txt
│ │ ├── _helpers.tpl
│ │ ├── clusterrole.yaml
│ │ ├── clusterrolebinding.yaml
│ │ ├── configmap.yaml
│ │ ├── job.yaml
│ │ └── serviceaccount.yaml
│ └── values.yaml
├── internal/
│ ├── data/
│ │ ├── read.go
│ │ ├── read_test.go
│ │ ├── tap.go
│ │ ├── tap_test.go
│ │ ├── testdata/
│ │ │ ├── dir-without-yaml-files/
│ │ │ │ └── .gitkeep
│ │ │ ├── invalid/
│ │ │ │ ├── duplicated-names.yaml
│ │ │ │ ├── empty-resources.yaml
│ │ │ │ ├── host-as-dst-udp.yaml
│ │ │ │ ├── host-as-source.yaml
│ │ │ │ ├── missing-fields.yaml
│ │ │ │ ├── multiple-dst-blocks.yaml
│ │ │ │ ├── not-a-list.yaml
│ │ │ │ └── wrong-test-values.yaml
│ │ │ ├── invalid-duplicated-names/
│ │ │ │ ├── input1.yaml
│ │ │ │ └── input2.yaml
│ │ │ └── valid/
│ │ │ ├── empty.yaml
│ │ │ └── multi.yaml
│ │ ├── types.go
│ │ └── types_test.go
│ ├── engine/
│ │ ├── engine.go
│ │ ├── engine_daemonset_test.go
│ │ ├── engine_deployment_test.go
│ │ ├── engine_mocks_test.go
│ │ ├── engine_pod_test.go
│ │ ├── engine_statefulset_test.go
│ │ ├── interface.go
│ │ ├── run_tcp.go
│ │ ├── run_tcp_test.go
│ │ └── run_udp.go
│ ├── kubeops/
│ │ ├── client.go
│ │ ├── container_test.go
│ │ ├── containers.go
│ │ ├── daemonset.go
│ │ ├── daemonset_test.go
│ │ ├── deployment.go
│ │ ├── deployment_test.go
│ │ ├── pod.go
│ │ ├── pod_test.go
│ │ ├── statefulset.go
│ │ ├── statefulset_test.go
│ │ └── string_gen.go
│ └── logger/
│ └── hclog.go
├── justfile
└── rbac/
├── cluster-role.yaml
└── cluster-rolebinding.yaml
================================================
FILE CONTENTS
================================================
================================================
FILE: .dockerignore
================================================
Dockerfile*
Jenkinsfile*
**/.terraform
.git/
.idea/
*.iml
.gcloudignore
================================================
FILE: .editorconfig
================================================
root = true
[*]
end_of_line = lf
indent_style = space
indent_size = 2
insert_final_newline = true
max_line_length = 120
trim_trailing_whitespace = true
[*.py]
indent_size = 4
[{Makefile,makefile,**.mk}]
indent_style = tab
[*.sh]
indent_style = space
indent_size = 2
shell_variant = bash # like -ln=posix
binary_next_line = true # like -bn
switch_case_indent = true # like -ci
space_redirects = true # like -sr
keep_padding = false # like -kp
================================================
FILE: .github/ISSUE_TEMPLATE/bug_report.md
================================================
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behaviour:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behaviour**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.
================================================
FILE: .github/ISSUE_TEMPLATE/feature_request.md
================================================
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is.
For example: I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
================================================
FILE: .github/ISSUE_TEMPLATE/question.md
================================================
---
name: Question
about: Post a question about the project
title: ''
labels: question
assignees: ''
---
**Your question**
A clear and concise question.
**Additional context**
Add any other context about your question here.
================================================
FILE: .github/PULL_REQUEST_TEMPLATE/pull_request_template.md
================================================
---
name: Pull Request
about: A pull request
title: ''
labels: ''
assignees: ''
---
[pull_requests]: https://github.com/controlplaneio/kubesec/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc
**All Submissions.**
- [ ] Have you followed the guidelines in our [Contributing document](../../CONTRIBUTING.md)?
- [ ] Have you checked to ensure there aren't other open [Pull Requests][pull_requests] for the same update/change?
**Code Submissions.**
- [ ] Does your submission pass linting, tests, and security analysis?
**Changes to Core Features.**
- [ ] Have you added an explanation of what your changes do and why you'd like us to include them?
- [ ] Have you written new tests for your core changes, as applicable?
================================================
FILE: .github/workflows/build.yaml
================================================
name: Lint and Build
on:
push:
tags-ignore:
- '*'
branches:
- '*'
pull_request:
branches: ['main', 'master']
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
- name: Run golangci-lint
uses: reviewdog/action-golangci-lint@f9bba13753278f6a73b27a56a3ffb1bfda90ed71 # v2
with:
go_version: "1.25.4"
fail_level: "none"
build:
runs-on: ubuntu-latest
needs: lint
steps:
- name: Checkout source code
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
- name: Setup Go
uses: actions/setup-go@be3c94b385c4f180051c996d336f57a34c397495 # v3
with:
go-version: '1.25.4'
- name: Install dependencies
run: go get ./...
- name: Test
run: go test -v ./... --race
- name: E2E Test
env:
KIND_E2E_TESTS: yes
run: go test -timeout 20m -v ./e2e/...
- name: Build
run: go build -v ./...
- name: Build Container
run: go build -v ./...
- name: Build an image from Dockerfile
run: |
docker build -t controlplane/netassert:${{ github.sha }} .
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
image-ref: 'controlplane/netassert:${{ github.sha }}'
format: 'table'
ignore-unfixed: true
exit-code: '1'
vuln-type: 'os,library'
severity: 'CRITICAL,HIGH,MEDIUM'
================================================
FILE: .github/workflows/release.yaml
================================================
name: release
on:
push:
tags:
- "v[0-9]+.[0-9]+.[0-9]+"
- "v[0-9]+.[0-9]+.[0-9]+-testing[0-9]+"
permissions:
contents: write
packages: write
id-token: write
attestations: write
env:
GH_REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
RELEASE_VERSION: ${{ github.ref_name }}
SCANNER_IMG_VERSION: v1.0.11
SNIFFER_IMG_VERSION: v1.1.9
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5
- name: Set up Go
uses: actions/setup-go@be3c94b385c4f180051c996d336f57a34c397495 # v3
with:
go-version: '1.25.4'
- uses: anchore/sbom-action/download-syft@f8bdd1d8ac5e901a77a92f111440fdb1b593736b # v0.20.6
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@5fdedb94abba051217030cc86d4523cf3f02243d # v4
with:
distribution: goreleaser
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5
# - name: Extract metadata (tags, labels) for Docker
# id: meta
# uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
# with:
# images: ${{ env.GH_REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
- name: Install cosign
uses: sigstore/cosign-installer@398d4b0eeef1380460a10c8013a76f728fb906ac # v3
- name: Log in to the GitHub Container registry
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
with:
registry: ${{ env.GH_REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc # v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push
id: buildpush
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6
with:
platforms: linux/amd64,linux/arm64
sbom: true
provenance: mode=max
push: true
tags: |
docker.io/controlplane/netassert:${{ env.RELEASE_VERSION }}
docker.io/controlplane/netassert:latest
${{ env.GH_REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.RELEASE_VERSION }}
${{ env.GH_REGISTRY }}/${{ env.IMAGE_NAME }}:latest
build-args: |
VERSION=${{ env.RELEASE_VERSION }}
SCANNER_IMG_VERSION=${{ env.SCANNER_IMG_VERSION }}
SNIFFER_IMG_VERSION=${{ env.SNIFFER_IMG_VERSION }}
- name: Sign artifact
run: |
cosign sign --yes \
"${{ env.GH_REGISTRY }}/${{ env.IMAGE_NAME }}@${{ steps.buildpush.outputs.digest }}"
cosign sign --yes \
"docker.io/controlplane/netassert@${{ steps.buildpush.outputs.digest }}"
helm:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@93cb6efe18208431cddfb8368fd83d5badbf9bfd # v5
- name: Set up Helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4
- name: Setup yq
uses: mikefarah/yq@065b200af9851db0d5132f50bc10b1406ea5c0a8 # v4
- name: Log in to GitHub Container Registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | helm registry login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Prepare and package Helm chart
run: |
CLEAN_VERSION=$(echo "$RELEASE_VERSION" | sed 's/^v//')
echo "Using chart version and appVersion: $CLEAN_VERSION"
yq -i ".image.tag = \"${RELEASE_VERSION}\"" ./helm/values.yaml
yq -i ".version = \"${CLEAN_VERSION}\"" ./helm/Chart.yaml
yq -i ".appVersion = \"${CLEAN_VERSION}\"" ./helm/Chart.yaml
helm package ./helm -d .
- name: Push Helm chart to GHCR
run: |
CLEAN_VERSION=$(echo "$RELEASE_VERSION" | sed 's/^v//')
helm push "./netassert-${CLEAN_VERSION}.tgz" oci://ghcr.io/${{ github.repository_owner }}/charts
================================================
FILE: .gitignore
================================================
# Secrets #
###########
*.pem
*.key
*_rsa
# Compiled source #
###################
*.com
*.class
*.dll
*.exe
*.o
*.so
*.pyc
# Packages #
############
# it's better to unpack these files and commit the raw source
# git has its own built in compression methods
*.7z
*.dmg
*.gz
*.iso
*.jar
*.rar
*.tar
*.zip
# Logs and databases #
######################
*.log
*.sqlite
pip-log.txt
# OS generated files #
######################
.DS_Store?
ehthumbs.db
Icon?
Thumbs.db
# IDE generated files #
#######################
.idea/
*.iml
atlassian-ide-plugin.xml
# Test Files #
##############
test/log
.coverage
.tox
nosetests.xml
# Package Managed Files #
#########################
bower_components/
vendor/
composer.lock
node_modules/
.npm/
venv/
.venv/
.venv2/
.venv3/
# temporary files #
###################
*.*swp
nohup.out
*.tmp
# Virtual machines #
####################
.vagrant/
# Pythonics #
#############
*.py[cod]
# Packages
*.egg
*.egg-info
dist
build
eggs
parts
var
sdist
develop-eggs
.installed.cfg
lib
lib64
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Complexity
output/*.html
output/*/index.html
# Sphinx
docs/_build
.scratch.md
conf/.config/keybase/
# Pipenv
Pipfile*
# backup files
*.backup
*.notworking
internal/types-not-used/
demo.yaml
.idea
cmd/netassert/cli/netassert
cmd/netassert/cli/results.tap
internal/logger/*.old
cmd/netassert/cli/cli
# Terraform
.terraform
*.tfstate
*.tfstate.*
crash.log
crash.*.log
*.tfvars
*.tfvars.json
override.tf
override.tf.json
*_override.tf
*_override.tf.json
.terraformrc
terraform.rc
*.lock.hcl*
# Kubeconfig
*.kubeconfig
# CLI
/cmd/netassert/cli/*.sh
abc
netassert-*-*-kubeconfig
bin
results.tap
================================================
FILE: .goreleaser.yaml
================================================
builds:
- id: netassert
env:
- CGO_ENABLED=0
ldflags:
- -s
- -w
- -X main.version={{.Tag}}
- -X main.gitHash={{.FullCommit}}
- -X main.buildDate={{.Date}}
goos:
- linux
- darwin
- windows
goarch:
- amd64
- arm
- arm64
goarm:
- 6
- 7
main: ./cmd/netassert/cli/
binary: netassert
archives:
- id: netassert
name_template: "{{ .ProjectName }}_{{ .Tag }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
format: tar.gz
format_overrides:
- goos: windows
format: zip
files:
- LICENSE
wrap_in_directory: false
checksum:
algorithm: sha256
name_template: 'checksums-sha256.txt'
changelog:
sort: asc
sboms:
- id: archive
artifacts: archive
- id: source
artifacts: source
================================================
FILE: .hadolint.yaml
================================================
---
ignored:
- DL3018 # Pin versions in apk add.
- DL3022 # COPY --from alias
================================================
FILE: .yamllint.yaml
================================================
---
extends: default
ignore: |
test/bin/
test/asset/
rules:
comments:
min-spaces-from-content: 1
line-length:
max: 120
truthy:
check-keys: false
================================================
FILE: CHANGELOG.md
================================================
# Changelog
All notable changes to this project will be documented in this file.
## Table of Contents
- [2.0.3](#203)
- [2.0.2](#202)
- [2.0.1](#201)
- [2.0.0](#200)
- [0.1.0](#010)
---
## `2.0.3`
- test with latest version of Kubernetes and update to Go 1.21
- update e2e tests with latest version of EKS and GKE and Calico CNI
## `2.0.2`
- integrate e2e tests with network policies
- fix a bug in udp testing
## `2.0.1`
- fix release naming
## `2.0.0`
- complete rewrite of the tool in Go, with unit and integration tests
- leverages the ephemeral container support in Kubernetes > v1.25
- test case(s) are written in YAML
- support for Pods, StatefulSets, DaemonSets and Deployments which are directly referred through their names in the test suites
- artifacts are available for download
## `0.1.0`
- initial release
- no artifacts available
================================================
FILE: CODE_OF_CONDUCT.md
================================================
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
education, socio-economic status, nationality, personal appearance, race,
religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by contacting Andrew Martin andy(at)control-plane.io.
All complaints will be reviewed and investigated and will result in a response that is deemed
necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of
an incident. Further details of specific enforcement policies may be
posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
================================================
FILE: CONTRIBUTING.md
================================================
# Contributing to NetAssert
:+1::tada: First off, thanks for taking the time to contribute! :tada::+1:
`NetAssert` is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
The following is a set of guidelines for contributing to `NetAssert`. We generally have stricter rules as it's a security
tool but don't let that discourage you from creating your PR, it can be incrementally fixed to fit the rules. Also feel
free to propose changes to this document in a pull request.
## Table Of Contents
- [Contributing to NetAssert](#contributing-to-netassert)
- [Table Of Contents](#table-of-contents)
- [Code of Conduct](#code-of-conduct)
- [I Don't Want To Read This Whole Thing I Just Have a Question!!!](#i-dont-want-to-read-this-whole-thing-i-just-have-a-question)
- [What Should I Know Before I Get Started?](#what-should-i-know-before-i-get-started)
- [How Can I Contribute?](#how-can-i-contribute)
- [Reporting Bugs](#reporting-bugs)
- [Before Submitting a Bug Report](#before-submitting-a-bug-report)
- [How Do I Submit a (Good) Bug Report?](#how-do-i-submit-a-good-bug-report)
- [Suggesting Enhancements](#suggesting-enhancements)
- [Before Submitting an Enhancement Suggestion](#before-submitting-an-enhancement-suggestion)
- [How Do I Submit A (Good) Enhancement Suggestion?](#how-do-i-submit-a-good-enhancement-suggestion)
- [Your First Code Contribution](#your-first-code-contribution)
- [Development](#development)
- [Pull Requests](#pull-requests)
- [Style Guides](#style-guides)
- [Git Commit Messages](#git-commit-messages)
- [General Style Guide](#general-style-guide)
- [GoLang Style Guide](#golang-style-guide)
- [Documentation Style Guide](#documentation-style-guide)
---
## Code of Conduct
This project and everyone participating are governed by the [Code of Conduct](CODE_OF_CONDUCT.md). By participating, you
are expected to uphold this code. Please report unacceptable behaviour to [andy@control-plane.io](mailto:andy@control-plane.io).
## I Don't Want To Read This Whole Thing I Just Have a Question!!!
We have an official message board with a detailed FAQ and where the community chimes in with helpful advice if you have questions.
We also have an issue template for questions [here](https://github.com/controlplaneio/netassert/issues/new).
## What Should I Know Before I Get Started?
Netassert has three components:
- [NetAssert](https://github.com/controlplaneio/netassert): This is responsible for orchestrating the tests and is also known as `netassert-engine`
- [NetAssertv2-packet-sniffer](https://github.com/controlplaneio/netassertv2-packet-sniffer): This is the sniffer component that is utilised during a UDP test and is injected to the destination/target Pod as an ephemeral container
- [NetAssertv2-l4-client](https://github.com/controlplaneio/netassertv2-l4-client): This is the scanner component that is injected as the scanner ephemeral container onto the source Pod and is utilised during both TCP and UDP tests
## How Can I Contribute?
### Reporting Bugs
This section guides you through submitting a bug report for `NetAssert`. Following these guidelines helps maintainers and the
community understand your report, reproduce the behaviour, and find related reports.
Before creating bug reports, please check [this list](#before-submitting-a-bug-report) as you might find out that you
don't need to create one. When you are creating a bug report, please [include as many details as possible](#how-do-i-submit-a-good-bug-report).
Fill out the issue template for bugs, the information it asks for helps us resolve issues faster.
> **Note:** If you find a **Closed** issue that seems like it is the same thing that you're experiencing, open a new issue
> and include a link to the original issue in the body of your new one.
#### Before Submitting a Bug Report
- **Perform a [cursory search](https://github.com/search?q=+is:issue+user:controlplaneio)** to see if the problem has already
been reported. If it has **and the issue is still open**, add a comment to the existing issue instead of opening a new
one
#### How Do I Submit a (Good) Bug Report?
Bugs are tracked as [GitHub issues](https://guides.github.com/features/issues/). Create an issue on that repository and
provide the following information by filling in the issue template [here](https://github.com/controlplaneio/netassert/issues/new).
Explain the problem and include additional details to help maintainers reproduce the problem:
- **Use a clear and descriptive title** for the issue to identify the problem
- **Describe the exact steps which reproduce the problem** in as many details as possible. For example, start by explaining
- **Provide specific examples to demonstrate the steps**. Include links to files or GitHub projects, or copy/pasteable
snippets, which you use in those examples. If you're providing snippets in the issue, use [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines)
- **Describe the behaviour you observed after following the steps** and point out what exactly is the problem with that behaviour
- **Explain which behaviour you expected to see instead and why.**
Provide more context by answering these questions:
- **Did the problem start happening recently** (e.g. after updating to a new version of netassert) or was this always a problem?
- If the problem started happening recently, **can you reproduce the problem in an older version of netassert?** What's the
most recent version in which the problem doesn't happen? You can download older versions of netassert from
[the releases page](https://github.com/controlplaneio/netassert/releases)
- **Can you reliably reproduce the issue?** If not, provide details about how often the problem happens and under which conditions
it normally happens
- If the problem is related to scanning files, **does the problem happen for all files and projects or only some?** Is there
anything else special about the files you are using? Please include them in your report, censor any sensitive information
but ensure the issue still exists with the censored file
### Suggesting Enhancements
This section guides you through submitting an enhancement suggestion for netassert, including completely new features and minor
improvements to existing functionality. Following these guidelines helps maintainers and the community understand your suggestion
and find related suggestions.
Before creating enhancement suggestions, please check [this list](#before-submitting-an-enhancement-suggestion) as you might
find out that you don't need to create one. When you are creating an enhancement suggestion, please
[include as many details as possible](#how-do-i-submit-a-good-enhancement-suggestion). Fill in the template feature request
template, including the steps that you imagine you would take if the feature you're requesting existed.
#### Before Submitting an Enhancement Suggestion
- **Perform a [cursory search](https://github.com/search?q=+is:issue+user:controlplaneio)** to see if the enhancement has
already been suggested. If it has, add a comment to the existing issue instead of opening a new one
#### How Do I Submit A (Good) Enhancement Suggestion?
Enhancement suggestions are tracked as [GitHub issues](https://guides.github.com/features/issues/). Create an issue on this
repository and provide the following information:
- **Use a clear and descriptive title** for the issue to identify the suggestion
- **Provide a step-by-step description of the suggested enhancement** in as many details as possible
- **Provide specific examples to demonstrate the steps**. Include copy/pasteable snippets which you use in those examples,
as [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines)
- **Describe the current behaviour** and **explain which behaviour you expected to see instead** and why
- **Explain why this enhancement would be useful** to most netassert users and isn't something that can or should be implemented
as a separate community project
- **List some other tools where this enhancement exists.**
- **Specify which version of netassert you're using.** You can get the exact version by running `netassert version` in your terminal
- **Specify the name and version of the OS you're using.**
### Your First Code Contribution
Unsure where to begin contributing to `netassert`? You can start by looking through these `Good First Issue` and `Help Wanted`
issues:
- [Good First Issue issues][good_first_issue] - issues which should only require a few lines of code, and a test or two
- [Help wanted issues][help_wanted] - issues which should be a bit more involved than `Good First Issue` issues
Both issue lists are sorted by total number of comments. While not perfect, number of comments is a reasonable proxy for
impact a given change will have.
#### Development
Download the latest version of [just](https://github.com/casey/just/releases). To build the project you can use `just build`. The resulting binary will be in `cmd/netassert/cli/netassert`. To run `unit` tests you can use `just test`. There is a seperate `README.md` in the `e2e` folder that lives in the root of this project that details `end-to-end` testing.
### Pull Requests
The process described here has several goals:
- Maintain the quality of `netassert`
- Fix problems that are important to users
- Engage the community in working toward the best possible netassert
- Enable a sustainable system for netassert's maintainers to review contributions
Please follow these steps to have your contribution considered by the maintainers:
1. Follow all instructions in the template
2. Follow the [style guides](#style-guides)
3. After you submit your pull request, verify that all [status checks](https://help.github.com/articles/about-status-checks/)
are passing
What if the status checks are failing?
If a status check is failing, and you believe that the failure is unrelated to your change, please leave a comment on
the pull request explaining why you believe the failure is unrelated. A maintainer will re-run the status check for
you. If we conclude that the failure was a false positive, then we will open an issue to track that problem with our
status check suite.
While the prerequisites above must be satisfied prior to having your pull request reviewed, the reviewer(s) may ask you to
complete additional tests, or other changes before your pull request can be ultimately accepted.
## Style Guides
### Git Commit Messages
- It's strongly preferred you [GPG Verify][commit_signing] your commits if you can
- Follow [Conventional Commits](https://www.conventionalcommits.org)
- Use the present tense ("add feature" not "added feature")
- Use the imperative mood ("move cursor to..." not "moves cursor to...")
- Limit the first line to 72 characters or less
- Reference issues and pull requests liberally after the first line
### General Style Guide
Look at installing an `.editorconfig` plugin or configure your editor to match the `.editorconfig` file in the root of the
repository.
### GoLang Style Guide
All Go code is linted with [golangci-lint](https://golangci-lint.run/).
For formatting rely on `gofmt` to handle styling.
### Documentation Style Guide
All markdown code is linted with [markdownlint-cli](https://github.com/igorshubovych/markdownlint-cli).
[good_first_issue]:https://github.com/controlplaneio/netassert/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22good+first+issue%22+sort%3Acomments-desc
[help_wanted]: https://github.com/controlplaneio/netassert/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22help+wanted%22
[commit_signing]: https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/managing-commit-signature-verification
================================================
FILE: Dockerfile
================================================
FROM golang:1.25-alpine AS builder
ARG VERSION
COPY . /build
WORKDIR /build
RUN go mod download && \
CGO_ENABLED=0 GO111MODULE=on go build -ldflags="-X 'main.appName=NetAssert' -X 'main.version=${VERSION}' -X 'main.scannerImgVersion=${SCANNER_IMG_VERSION}' -X 'main.snifferImgVersion=${SNIFFER_IMG_VERSION}'" -v -o /netassertv2 cmd/netassert/cli/*.go && \
ls -ltr /netassertv2
FROM gcr.io/distroless/base:nonroot
COPY --from=builder /netassertv2 /usr/bin/netassertv2
ENTRYPOINT [ "/usr/bin/netassertv2" ]
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017 control-plane.io
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: README.md
================================================
# Netassert
[![Testing Workflow][testing_workflow_badge]][testing_workflow_badge]
[![Release Workflow][release_workflow_badge]][release_workflow_badge]
`NetAssert` is a command line tool that enables you to check the network connectivity between Kubernetes objects such as Pods, Deployments, DaemonSets, and StatefulSets, as well as test their connectivity to remote hosts or IP addresses. `NetAssert` v2 is a rewrite of original `NetAssert` tool in Go that utilises the ephemeral container support in Kubernetes to verify network connectivity. `NetAssert` test(s) are defined in YAML format. `NetAssert` **currently supports TCP and UDP protocols**:
- To perform a TCP test, only a [`scanner`](https://github.com/controlplaneio/netassertv2-l4-client) container is used. This container requires no privileges nor any Linux capabilities.
- To run a UDP test, a [`sniffer`](https://github.com/controlplaneio/netassertv2-packet-sniffer) ephemeral container is injected into the target Pod which requires `cap_raw` capabilities to read data from the network interface. During UDP testing, `NetAssert` runs both container `scanner` and `sniffer` container images which are injected as `ephemeral` containers into running Pods.
The [`sniffer`](https://github.com/controlplaneio/netassertv2-packet-sniffer) and [`scanner`](https://github.com/controlplaneio/netassertv2-l4-client) container images can be downloaded from:
- `docker.io/controlplane/netassertv2-l4-client:latest`
- Used for both TCP and UDP testing and acts as a Layer 4 (TCP/UDP) client
- Requires no privileges nor any Linux capabilities.
- `docker.io/controlplane/netassertv2-packet-sniffer:latest`
- Used for UDP testing only, injected at the destination to capture packet and search for specific string in the payload
- requires `cap_raw` capabilities to read data from the network interface
`NetAssert` utilises the above containers during test and configures them using *environment variables*. The list of environment variables that are used can be found [here](https://github.com/controlplaneio/netassertv2-packet-sniffer) and [here](https://github.com/controlplaneio/netassertv2-l4-client). It is possible to override the `sniffer` and `scanner` images from command line during a run, so one can also bring their own container image(s) as long as they support the same environment variables.
## Installation
- Please download the latest stable version of `NetAssert` from [releases](https://github.com/controlplaneio/netassert/releases) page. The binary is available for Linux, MacOS and Windows platforms.
- If you are on Unix/Linux, you can also use the [download.sh](./download.sh) script to download the latest version of `NetAssert` into the current path:
```bash
curl -sL https://raw.githubusercontent.com/controlplaneio/netassert/master/download.sh | bash
```
## Test specification
`NetAssert` v2 tests are written in YAML format. Each test is a YAML document which supports the following mappings:
- A YAML document is a list of `NetAssert` test. Each test has the following keys:
- **name**: a scalar representing the name of the connection
- **type**: a scalar representing the type of connection, only "k8s" is supported at this time
- **protocol**: a scalar representing the protocol used for the connection, which must be "tcp" or "udp"
- **targetPort**: an integer scalar representing the target port used by the connection
- **timeoutSeconds**: an integer scalar representing the timeout for the connection in seconds
- **attempts**: an integer scalar representing the number of connection attempts for the test
- **exitCode**: an integer scalar representing the expected exit code from the ephemeral/debug container(s)
- **src**: a mapping representing the source Kubernetes resource, which has the following keys:
- **k8sResource**: a mapping representing a Kubernetes resource with the following keys:
- **kind**: a scalar representing the kind of the Kubernetes resource, which can be `deployment`, `statefulset`, `daemonset` or `pod`
- **name**: a scalar representing the name of the Kubernetes resource
- **namespace**: a scalar representing the namespace of the Kubernetes resource
- **dst**: a mapping representing the destination Kubernetes resource or host, **which can have one of the the following keys** i.e both `k8sResource` and `host` **are not supported at the same time** :
- **k8sResource**: a mapping representing a Kubernetes resource with the following keys:
- **kind**: a scalar representing the kind of the Kubernetes resource, which can be `deployment`, `statefulset`, `daemonset` or `pod`
- **name**: a scalar representing the name of the Kubernetes resource
- **namespace**: a scalar representing the namespace of the Kubernetes resource. (Note: Only allowed when protocol is "tcp")
- **host**: a mapping representing a host/node with the following key:
- **name**: a scalar representing the name or IP address of the host/node. (Note: Only allowed when protocol is "tcp" or "udp", but not both at the same time)
This is an example of a test that can be consumed by `NetAssert` utility
```yaml
---
- name: busybox-deploy-to-echoserver-deploy
type: k8s
protocol: tcp
targetPort: 8080
timeoutSeconds: 67
attempts: 3
exitCode: 0
src:
k8sResource:
kind: deployment
name: busybox
namespace: busybox
dst:
k8sResource:
kind: deployment
name: echoserver
namespace: echoserver
#######
#######
- name: busybox-deploy-to-core-dns
type: k8s
protocol: udp
targetPort: 53
timeoutSeconds: 67
attempts: 3
exitCode: 0
src:
k8sResource:
kind: deployment
name: busybox
namespace: busybox
dst:
k8sResource:
kind: deployment
name: coredns
namespace: kube-system
######
######
- name: busybox-deploy-to-web-statefulset
type: k8s
protocol: tcp
targetPort: 80
timeoutSeconds: 67
attempts: 3
exitCode: 0
src:
k8sResource: # this is type endpoint
kind: deployment
name: busybox
namespace: busybox
dst:
k8sResource: ## this is type endpoint
kind: statefulset
name: web
namespace: web
###
###
- name: fluentd-daemonset-to-web-statefulset
type: k8s
protocol: tcp
targetPort: 80
timeoutSeconds: 67
attempts: 3
exitCode: 0
src:
k8sResource: # this is type endpoint
kind: daemonset
name: fluentd
namespace: fluentd
dst:
k8sResource: ## this is type endpoint
kind: statefulset
name: web
namespace: web
###
####
- name: busybox-deploy-to-control-plane-dot-io
type: k8s
protocol: tcp
targetPort: 80
timeoutSeconds: 67
attempts: 3
exitCode: 0
src:
k8sResource: # type endpoint
kind: deployment
name: busybox
namespace: busybox
dst:
host: # type host or node or machine
name: control-plane.io
###
###
- name: test-from-pod1-to-pod2
type: k8s
protocol: tcp
targetPort: 80
timeoutSeconds: 67
attempts: 3
exitCode: 0
src:
k8sResource: ##
kind: pod
name: pod1
namespace: pod1
dst:
k8sResource:
kind: pod
name: pod2
namespace: pod2
###
###
- name: busybox-deploy-to-fake-host
type: k8s
protocol: tcp
targetPort: 333
timeoutSeconds: 67
attempts: 3
exitCode: 1
src:
k8sResource: # type endpoint
kind: deployment
name: busybox
namespace: busybox
dst:
host: # type host or node or machine
name: 0.0.0.0
...
```
## Components
`NetAssert` has three main components:
- [NetAssert](https://github.com/controlplaneio/netassert): This is responsible for orchestrating the tests and is also known as `Netassert-Engine` or simply the `Engine`
- [NetAssertv2-packet-sniffer](https://github.com/controlplaneio/netassertv2-packet-sniffer): This is the sniffer component that is utilised during a UDP test and is injected to the destination/target Pod as an ephemeral container
- [NetAssertv2-l4-client](https://github.com/controlplaneio/netassertv2-l4-client): This is the scanner component that is injected as the scanner ephemeral container onto the source Pod and is utilised during both TCP and UDP tests
## Detailed steps/flow of tests
All the tests are read from an YAML file or a directory (step **1**) and the results are written following the [TAP format](https://testanything.org/) (step **5** for UDP and step **4** for TCP). The tests are performed in two different manners depending on whether a TCP or UDP connection is used
### UDP test
- Validate the test spec and ensure that the `src` and `dst` fields are correct: for udp tests both of them must be of type `k8sResource`
- Find a running Pod called `dstPod` in the object defined by the `dst.k8sResource` field. Ensure that the Pod is in running state and has an IP address allocated by the CNI
- Find a running Pod called `srcPod` in the object defined by the `src.k8sResource` field. Ensure that the Pod is in running state and has an IP address allocated by the CNI
- Generate a random UUID, which will be used by both ephemeral containers
- Inject the `netassert-l4-client` as an ephemeral container in the `srcPod` (step **2**) and set the port and protocol according to the test specifications. Provide also the target host equal to the previously found dstPod IP address, and the random UUID that was generated in the previous step as the message to be sent over the udp connection. At the same time, inject the `netassertv2-packet-sniffer` (step **3**) as an ephemeral container in the `dstPod` using the protocol, search string, number of matches and timeout defined in the test specifications. The search_string environment variable is equal to the UUID that was generated in the previous step which is expected to be found in the data sent by the scanner when the connections are successful.
- Poll that status of the ephemeral containers (step **4**)
- Ensure that the `netassertv2-packet-sniffer` ephemeral sniffer container’s exit status matches the one defined in the test specification
- Ensure that the `netassert-l4-client`, exits with exit status of zero. This should always be the case as UDP is not a connection oriented protocol.
### TCP test
- Validate the test spec and ensure that the `src` field is of type `k8sResource`
- Find a running Pod called `srcPod` in the object defined by the `src.k8sResource` field. Ensure that the Pod is in running state and has an IPAddress
- Check if `dst` has `k8sResource` defined as a child object. If so then find a running Pod defined by the `dst.K8sResource`
- Inject the `netassert-l4-client` as an ephemeral container in the `srcPod` (step **2**). Configure the `netassert-l4-client` similarly to the udp case. If the `dst` field is set to `host` then use the host `name` field as the scanner target host
- Poll that status of the ephemeral containers (step **3**)
- Ensure that the exit code of that container matches the `exitCode` field defined in the test specification
## Development
- You will need Go version 1.25.x or higher. Download the latest version of [just](https://github.com/casey/just/releases). To build the project you can use `just build`. The resulting binary will be in `cmd/netassert/cli/netassert`. To run `unit` tests you can use `just test`. There is a separate [README.md](./e2e/README.md) that details `end-to-end` testing.
## Quick testing
### Spinning up the environment
- Make sure you have installed [`kind`](https://kind.sigs.k8s.io/) and its prerequisites
- Make sure you have also installed [`just`](https://github.com/casey/just/releases)
- Download the `NetAssert` binary from the [release](https://github.com/controlplaneio/netassert/releases) page:
```bash
❯ VERSION="v2.1.3" # change it to the version you want to install
❯ OS_DISTRO=linux_amd64 # change it to your OS_DISTRO (for reference check the NetAssert release page)
❯ curl -L -o netassert.tar.gz https://github.com/controlplaneio/netassert/releases/download/${VERSION}/netassert_${VERSION}_${OS_ARCH}.tar.gz
❯ tar -xzf netassert.tar.gz -C bin/netassert
```
- Alternatively, you can build `NetAssert` from source:
```bash
❯ just build
```
- You will also need a working kubernetes cluster with ephemeral/debug container support and a CNI that supports Network Policies, you can spin one quickly using the `justfile` included in the repo:
```bash
❯ just kind-down ; just kind-up
❯ just calico-apply
```
- wait for all the nodes to become ready:
```bash
❯ kubectl get nodes -w
```
### Running the sample tests
- In order to use the sample tests, you need to create network policies and kubernetes resources:
```bash
❯ just k8s-apply
kubectl apply -f ./e2e/manifests/workload.yaml
namespace/fluentd created
daemonset.apps/fluentd created
namespace/echoserver created
namespace/busybox created
deployment.apps/echoserver created
deployment.apps/busybox created
namespace/pod1 created
namespace/pod2 created
pod/pod2 created
pod/pod1 created
namespace/web created
statefulset.apps/web created
```
```bash
❯ just netpol-apply
kubectl apply -f ./e2e/manifests/networkpolicies.yaml
networkpolicy.networking.k8s.io/web created
```
- Wait for the workload to become ready (note that the workload pods are the ones created after running `just k8s-apply` in a previous step):
```bash
❯ kubectl get pods -A
busybox busybox-6c85d76fdc-r8gtp 1/1 Running 0 76s
echoserver echoserver-64bd7c5dc6-ldwh9 1/1 Running 0 76s
fluentd fluentd-5pp9c 1/1 Running 0 76s
fluentd fluentd-8vvp9 1/1 Running 0 76s
fluentd fluentd-9jblb 1/1 Running 0 76s
fluentd fluentd-jnlql 1/1 Running 0 76s
kube-system calico-kube-controllers-565c89d6df-8mwk9 1/1 Running 0 117s
kube-system calico-node-2sqhw 1/1 Running 0 117s
kube-system calico-node-4sxpn 1/1 Running 0 117s
kube-system calico-node-5gtg7 1/1 Running 0 117s
kube-system calico-node-kxjq8 1/1 Running 0 117s
kube-system coredns-7d764666f9-74xgb 1/1 Running 0 2m29s
kube-system coredns-7d764666f9-jvnr4 1/1 Running 0 2m29s
kube-system etcd-packet-test-control-plane 1/1 Running 0 2m35s
kube-system kube-apiserver-packet-test-control-plane 1/1 Running 0 2m35s
kube-system kube-controller-manager-packet-test-control-plane 1/1 Running 0 2m35s
kube-system kube-proxy-4xjp2 1/1 Running 0 2m27s
kube-system kube-proxy-b28pw 1/1 Running 0 2m29s
kube-system kube-proxy-p9smj 1/1 Running 0 2m27s
kube-system kube-proxy-xb2wq 1/1 Running 0 2m27s
kube-system kube-scheduler-packet-test-control-plane 1/1 Running 0 2m35s
local-path-storage local-path-provisioner-67b8995b4b-jf8lc 1/1 Running 0 2m29s
pod1 pod1 1/1 Running 0 75s
pod2 pod2 1/1 Running 0 76s
web web-0 1/1 Running 0 75s
web web-1 1/1 Running 0 31s
```
- Run the netassert binary pointing it to the test cases:
```bash
❯ bin/netassert run --input-file ./e2e/manifests/test-cases.yaml
❯ cat results.tap
TAP version 14
1..9
ok 1 - busybox-deploy-to-echoserver-deploy
ok 2 - busybox-deploy-to-echoserver-deploy-2
ok 3 - fluentd-deamonset-to-echoserver-deploy
ok 4 - busybox-deploy-to-web-statefulset
ok 5 - web-statefulset-to-busybox-deploy
ok 6 - fluentd-daemonset-to-web-statefulset
ok 7 - busybox-deploy-to-control-plane-dot-io
ok 8 - test-from-pod1-to-pod2
ok 9 - busybox-deploy-to-fake-host
```
- To see the results when a check fails, run:
```bash
❯ just netpol-rm-apply
kubectl delete -f ./e2e/manifests/networkpolicies.yaml
networkpolicy.networking.k8s.io "web" deleted
❯ bin/netassert run --input-file ./e2e/manifests/test-cases.yaml
❯ cat results.tap
TAP version 14
1..9
ok 1 - busybox-deploy-to-echoserver-deploy
ok 2 - busybox-deploy-to-echoserver-deploy-2
ok 3 - fluentd-deamonset-to-echoserver-deploy
ok 4 - busybox-deploy-to-web-statefulset
not ok 5 - web-statefulset-to-busybox-deploy
---
reason: ephemeral container netassertv2-client-aihlpxcys exit code for test web-statefulset-to-busybox-deploy
is 0 instead of 1
...
ok 6 - fluentd-daemonset-to-web-statefulset
ok 7 - busybox-deploy-to-control-plane-dot-io
ok 8 - test-from-pod1-to-pod2
ok 9 - busybox-deploy-to-fake-host
```
## Compatibility
NetAssert is architected for compatibility with Kubernetes versions that offer support for ephemeral containers. We have thoroughly tested NetAssert with Kubernetes versions 1.25 to 1.35, confirming compatibility and performance stability.
For broader validation, our team has also executed comprehensive [end-to-end tests](./e2e/README.md) against various Kubernetes distributions and CNIs which is detailed below:
| Kubernetes Distribution | Supported Version | Container Network Interface (CNI) |
|-------------------------|-------------------|------------------------------------
| Amazon EKS | 1.34 and higher | AWS VPC CNI |
| Amazon EKS | 1.34 and higher | Calico (Version 3.26 or later) |
| Google GKE | 1.33 and higher | Google Cloud Platform VPC CNI |
| Google GKE | 1.33 and higher | Google Cloud Dataplane V2 |
## Checking for ephemeral container support
You can check for ephemeral container support using the following command:
```bash
❯ netassert ping
2023-03-27T11:25:28.421+0100 [INFO] [NetAssert-v2.0.0]: ✅ Successfully pinged /healthz endpoint of the Kubernetes server
2023-03-27T11:25:28.425+0100 [INFO] [NetAssert-v2.0.0]: ✅ Ephemeral containers are supported by the Kubernetes server
```
## Increasing logging verbosity
You can increase the logging level to `debug` by passing `--log-level` argument:
```bash
❯ netassert run --input-file ./e2e/manifests/test-cases.yaml --log-level=debug
```
## RBAC Configuration
This tool can be run according to the Principle of Least Privilege (PoLP) by properly configuring the RBAC.
The list of required permissions can be found in the `netassert` ClusterRole `rbac/cluster-role.yaml`, which could be redefined as a Role for namespacing reasons if needed. This role can then be bound to a "principal" either through a RoleBinding or a ClusterRoleBinding, depending on whether the scope of the role is supposed to be namespaced or not. The ClusterRoleBinding `rbac/cluster-rolebinding.yaml` is an example where the user `netassert-user` is assigned the role `netassert` using a cluster-wide binding called `netassert`
## Limitations
- When performing UDP scanning, the sniffer container [image](https://github.com/controlplaneio/netassertv2-packet-sniffer) needs `cap_net_raw` capability so that it can bind and read packets from the network interface. As a result, admission controllers or other security mechanisms must be modified to allow the `sniffer` image to run with this capability. Currently, the Security context used by the ephemeral sniffer container looks like the following:
```yaml
...
...
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_RAW
runAsNonRoot: true
...
...
```
- Although they do not consume any resources, ephemeral containers that are injected as part of the test(s) by `NetAssert` will remain in the Pod specification
- Service meshes are not be currently supported
## E2E Tests
- Please check this [README.md](./e2e/README.md)
[testing_workflow_badge]: https://github.com/controlplaneio/netassert/actions/workflows/build.yaml/badge.svg
[release_workflow_badge]: https://github.com/controlplaneio/netassert/actions/workflows/release.yaml/badge.svg
================================================
FILE: SECURITY.md
================================================
# Security Policy
## Our Security Address
Contact: `security@control-plane.io`
Encryption: `https://keybase.io/sublimino/pgp_keys.asc`
Disclosure: `Full`
================================================
FILE: cmd/netassert/cli/common.go
================================================
package main
import (
"errors"
"github.com/controlplaneio/netassert/v2/internal/data"
"github.com/controlplaneio/netassert/v2/internal/kubeops"
"github.com/hashicorp/go-hclog"
)
// loadTestCases - Reads test from a file or Directory
func loadTestCases(testCasesFile, testCasesDir string) (data.Tests, error) {
if testCasesFile == "" && testCasesDir == "" {
return nil, errors.New("either an input file or an input dir containing the tests must be provided using " +
"flags (--input-file or --input-dir)")
}
if testCasesFile != "" && testCasesDir != "" {
return nil, errors.New("input must be either a file or a directory but not both i.e use one of " +
"the flags --input-file or --input-dir")
}
var (
testCases data.Tests
err error
)
switch {
case testCasesDir != "":
testCases, err = data.ReadTestsFromDir(testCasesDir)
case testCasesFile != "":
testCases, err = data.ReadTestsFromFile(testCasesFile)
}
return testCases, err
}
// createService - creates a new kubernetes operations service
func createService(kubeconfigPath string, l hclog.Logger) (*kubeops.Service, error) {
// if the user has supplied a kubeConfig file location then
if kubeconfigPath != "" {
return kubeops.NewServiceFromKubeConfigFile(kubeconfigPath, l)
}
return kubeops.NewDefaultService(l)
}
================================================
FILE: cmd/netassert/cli/gen_result.go
================================================
package main
import (
"fmt"
"os"
"github.com/hashicorp/go-hclog"
"github.com/controlplaneio/netassert/v2/internal/data"
)
// genResult - Prints results to Stdout and writes it to a Tap file
func genResult(testCases data.Tests, tapFile string, lg hclog.Logger) error {
failedTestCases := 0
for _, v := range testCases {
// increment the no. of test cases
if v.Pass {
lg.Info("✅ Test Result", "Name", v.Name, "Pass", v.Pass)
continue
}
lg.Info("❌ Test Result", "Name", v.Name, "Pass", v.Pass, "FailureReason", v.FailureReason)
failedTestCases++
}
tf, err := os.Create(tapFile)
if err != nil {
return fmt.Errorf("unable to create tap file %q: %w", tapFile, err)
}
if err := testCases.TAPResult(tf); err != nil {
return fmt.Errorf("unable to generate tap results: %w", err)
}
if err := tf.Close(); err != nil {
return fmt.Errorf("unable to close tap file %q: %w", tapFile, err)
}
lg.Info("✍ Wrote test result in a TAP File", "fileName", tapFile)
if failedTestCases > 0 {
return fmt.Errorf("total %v test cases have failed", failedTestCases)
}
return nil
}
================================================
FILE: cmd/netassert/cli/main.go
================================================
package main
import (
"os"
_ "go.uber.org/automaxprocs"
)
func main() {
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
}
}
================================================
FILE: cmd/netassert/cli/ping.go
================================================
package main
import (
"context"
"fmt"
"os"
"time"
"github.com/hashicorp/go-hclog"
"github.com/spf13/cobra"
"github.com/controlplaneio/netassert/v2/internal/kubeops"
"github.com/controlplaneio/netassert/v2/internal/logger"
)
const (
apiServerHealthEndpoint = `/healthz` // health endpoint for the K8s server
)
type pingCmdConfig struct {
KubeConfig string
PingTimeout time.Duration
}
var pingCmdCfg = pingCmdConfig{}
var pingCmd = &cobra.Command{
Use: "ping",
Short: "pings the K8s API server over HTTP(S) to see if it is alive and also checks if the server has support for " +
"ephemeral containers.",
Long: "pings the K8s API server over HTTP(S) to see if it is alive and also checks if the server has support for " +
"ephemeral/debug containers.",
Run: func(cmd *cobra.Command, args []string) {
ctx, cancel := context.WithTimeout(context.Background(), pingCmdCfg.PingTimeout)
defer cancel()
lg := logger.NewHCLogger("info", fmt.Sprintf("%s-%s", appName, version), os.Stdout)
k8sSvc, err := createService(pingCmdCfg.KubeConfig, lg)
if err != nil {
lg.Error("Ping failed, unable to build K8s Client", "error", err)
os.Exit(1)
}
ping(ctx, lg, k8sSvc)
},
Version: rootCmd.Version,
}
// checkEphemeralContainerSupport checks to see if ephemeral containers are supported by the K8s server
func ping(ctx context.Context, lg hclog.Logger, k8sSvc *kubeops.Service) {
if err := k8sSvc.PingHealthEndpoint(ctx, apiServerHealthEndpoint); err != nil {
lg.Error("Ping failed", "error", err)
os.Exit(1)
}
lg.Info("✅ Successfully pinged " + apiServerHealthEndpoint + " endpoint of the Kubernetes server")
if err := k8sSvc.CheckEphemeralContainerSupport(ctx); err != nil {
lg.Error("❌ Ephemeral containers are not supported by the Kubernetes server",
"error", err)
os.Exit(1)
}
lg.Info("✅ Ephemeral containers are supported by the Kubernetes server")
}
func init() {
pingCmd.Flags().DurationVarP(&pingCmdCfg.PingTimeout, "timeout", "t", 60*time.Second,
"Timeout for the ping command")
pingCmd.Flags().StringVarP(&pingCmdCfg.KubeConfig, "kubeconfig", "k", "", "path to kubeconfig file")
}
================================================
FILE: cmd/netassert/cli/root.go
================================================
package main
import (
"fmt"
"github.com/spf13/cobra"
)
// these variables are overwritten at build time using ldflags
var (
version = "v2.0.0-dev" // netassert version
appName = "NetAssert" // name of the application
gitHash = "" // the git hash of the build
buildDate = "" // build date, will be injected by the build system
scannerImgVersion = "latest" // scanner container image version
snifferImgVersion = "latest" // sniffer container image version
)
var rootCmd = &cobra.Command{
Use: "netassert",
Short: "NetAssert is a command line utility to test network connectivity between kubernetes objects",
Long: "NetAssert is a command line utility to test network connectivity between kubernetes objects.\n" +
"It currently supports Deployment, Pod, Statefulset and Daemonset.\nYou can check the traffic flow between these objects or from these " +
"objects to a remote host or an IP address.\n\nBuilt by ControlPlane https://control-plane.io",
Version: fmt.Sprintf("\nBuilt by ControlPlane https://control-plane.io\n"+
"Version: %s\nCommit Hash: %s\nBuild Date: %s\n",
version, gitHash, buildDate),
}
func init() {
// add our subcommands
rootCmd.AddCommand(runCmd)
rootCmd.AddCommand(validateCmd)
rootCmd.AddCommand(versionCmd)
rootCmd.AddCommand(pingCmd)
}
================================================
FILE: cmd/netassert/cli/run.go
================================================
package main
import (
"context"
"fmt"
"os"
"os/signal"
"syscall"
"time"
"github.com/hashicorp/go-hclog"
"github.com/spf13/cobra"
"github.com/controlplaneio/netassert/v2/internal/engine"
"github.com/controlplaneio/netassert/v2/internal/logger"
)
// RunConfig - configuration for the run command
type runCmdConfig struct {
TapFile string
SuffixLength int
SnifferContainerImage string
SnifferContainerPrefix string
ScannerContainerImage string
ScannerContainerPrefix string
PauseInSeconds int
PacketCaptureInterface string
KubeConfig string
TestCasesFile string
TestCasesDir string
LogLevel string
}
// Initialize with default values
var runCmdCfg = runCmdConfig{
TapFile: "results.tap", // name of the default TAP file where the results will be written
SuffixLength: 9, // suffix length of the random string to be appended to the container name
SnifferContainerImage: fmt.Sprintf("%s:%s", "docker.io/controlplane/netassertv2-packet-sniffer", snifferImgVersion),
SnifferContainerPrefix: "netassertv2-sniffer",
ScannerContainerImage: fmt.Sprintf("%s:%s", "docker.io/controlplane/netassertv2-l4-client", scannerImgVersion),
ScannerContainerPrefix: "netassertv2-client",
PauseInSeconds: 1, // seconds to pause before each test case
PacketCaptureInterface: `eth0`, // the interface used by the sniffer image to capture traffic
LogLevel: "info", // log level
}
var runCmd = &cobra.Command{
Use: "run",
Short: "Run the program with the specified source file or source directory. Only one of the two " +
"flags (--input-file and --input-dir) can be used at a time. The --input-dir " +
"flag only reads the first level of the directory and does not recursively scan it.",
Long: "Run the program with the specified source file or source directory. Only one of the two " +
"flags (--input-file and --input-dir) can be used at a time. The --input-dir " +
"flag only reads the first level of the directory and does not recursively scan it.",
Run: func(cmd *cobra.Command, args []string) {
lg := logger.NewHCLogger(runCmdCfg.LogLevel, fmt.Sprintf("%s-%s", appName, version), os.Stdout)
if err := runTests(lg); err != nil {
lg.Error(" ❌ Failed to successfully run all the tests", "error", err)
os.Exit(1)
}
},
Version: rootCmd.Version,
}
// run - runs the netAssert Test(s)
func runTests(lg hclog.Logger) error {
testCases, err := loadTestCases(runCmdCfg.TestCasesFile, runCmdCfg.TestCasesDir)
if err != nil {
return fmt.Errorf("unable to load test cases: %w", err)
}
//lg := logger.NewHCLogger(runCmdCfg.LogLevel, fmt.Sprintf("%s-%s", appName, version), os.Stdout)
k8sSvc, err := createService(runCmdCfg.KubeConfig, lg)
if err != nil {
return fmt.Errorf("failed to build K8s client: %w", err)
}
ctx := context.Background()
ctx, cancel := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
defer cancel()
// ping the kubernetes cluster and check to see if
// it is alive and that it has support for ephemeral container(s)
ping(ctx, lg, k8sSvc)
// initialise our test runner
testRunner := engine.New(k8sSvc, lg)
// initialise our done signal
done := make(chan struct{})
// add our test runner to the wait group
go func() {
defer func() {
// once all our go routines have finished notify the done channel
done <- struct{}{}
}()
// run the tests
testRunner.RunTests(
ctx, // context to use
testCases, // net assert test cases
runCmdCfg.SnifferContainerPrefix, // prefix used for the sniffer container name
runCmdCfg.SnifferContainerImage, // sniffer container image location
runCmdCfg.ScannerContainerPrefix, // scanner container prefix used in the container name
runCmdCfg.ScannerContainerImage, // scanner container image location
runCmdCfg.SuffixLength, // length of random string that will be appended to the snifferContainerPrefix and scannerContainerPrefix
time.Duration(runCmdCfg.PauseInSeconds)*time.Second, // pause duration between each test
runCmdCfg.PacketCaptureInterface, // the interface used by the sniffer image to capture traffic
)
}()
// Wait for the tests to finish or for the context to be canceled
select {
case <-done:
// all our tests have finished running
case <-ctx.Done():
lg.Info("Received signal from OS", "msg", ctx.Err())
// context has been cancelled, we wait for our test runner to finish
<-done
}
return genResult(testCases, runCmdCfg.TapFile, lg)
}
func init() {
// Bind flags to the runCmd
runCmd.Flags().StringVarP(&runCmdCfg.TapFile, "tap", "t", runCmdCfg.TapFile, "output tap file containing the tests results")
runCmd.Flags().IntVarP(&runCmdCfg.SuffixLength, "suffix-length", "s", runCmdCfg.SuffixLength, "length of the random suffix that will appended to the scanner/sniffer containers")
runCmd.Flags().StringVarP(&runCmdCfg.SnifferContainerImage, "sniffer-image", "i", runCmdCfg.SnifferContainerImage, "container image to be used as sniffer")
runCmd.Flags().StringVarP(&runCmdCfg.SnifferContainerPrefix, "sniffer-prefix", "p", runCmdCfg.SnifferContainerPrefix, "prefix of the sniffer container")
runCmd.Flags().StringVarP(&runCmdCfg.ScannerContainerImage, "scanner-image", "c", runCmdCfg.ScannerContainerImage, "container image to be used as scanner")
runCmd.Flags().StringVarP(&runCmdCfg.ScannerContainerPrefix, "scanner-prefix", "x", runCmdCfg.ScannerContainerPrefix, "prefix of the scanner debug container name")
runCmd.Flags().IntVarP(&runCmdCfg.PauseInSeconds, "pause-sec", "P", runCmdCfg.PauseInSeconds, "number of seconds to pause before running each test case")
runCmd.Flags().StringVarP(&runCmdCfg.PacketCaptureInterface, "interface", "n", runCmdCfg.PacketCaptureInterface, "the network interface used by the sniffer container to capture packets")
runCmd.Flags().StringVarP(&runCmdCfg.TestCasesFile, "input-file", "f", runCmdCfg.TestCasesFile, "input test file that contains a list of netassert tests")
runCmd.Flags().StringVarP(&runCmdCfg.TestCasesDir, "input-dir", "d", runCmdCfg.TestCasesDir, "input test directory that contains a list of netassert test files")
runCmd.Flags().StringVarP(&runCmdCfg.KubeConfig, "kubeconfig", "k", runCmdCfg.KubeConfig, "path to kubeconfig file")
runCmd.Flags().StringVarP(&runCmdCfg.LogLevel, "log-level", "l", "info", "set log level (info, debug or trace)")
}
================================================
FILE: cmd/netassert/cli/validate.go
================================================
package main
import (
"fmt"
"os"
"github.com/spf13/cobra"
)
// validateCmdConfig - config for validate sub-command
type validateCmdConfig struct {
TestCasesFile string
TestCasesDir string
}
var (
validateCmdCfg validateCmdConfig // config for validate sub-command that will be used in the package
validateCmd = &cobra.Command{
Use: "validate",
Short: "verify the syntax and semantic correctness of netassert test(s) in a test file or folder. Only one of the " +
"two flags (--input-file and --input-dir) can be used at a time.",
Run: validateTestCases,
Version: rootCmd.Version,
}
)
// validateTestCases - validates test cases from file or directory
func validateTestCases(cmd *cobra.Command, args []string) {
_, err := loadTestCases(validateCmdCfg.TestCasesFile, validateCmdCfg.TestCasesDir)
if err != nil {
fmt.Println("❌ Validation of test cases failed", "error", err)
os.Exit(1)
}
fmt.Println("✅ All test cases are valid syntax-wise and semantically")
}
func init() {
validateCmd.Flags().StringVarP(&validateCmdCfg.TestCasesFile, "input-file", "f", "", "input test file that contains a list of netassert tests")
validateCmd.Flags().StringVarP(&validateCmdCfg.TestCasesDir, "input-dir", "d", "", "input test directory that contains a list of netassert test files")
}
================================================
FILE: cmd/netassert/cli/version.go
================================================
package main
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/controlplaneio/netassert/v2/internal/logger"
)
var versionCmd = &cobra.Command{
Use: "version",
Short: "Prints the version and other details associated with the program",
SilenceUsage: false,
Run: versionDetails,
}
// versionDetails - prints build information to the STDOUT
func versionDetails(cmd *cobra.Command, args []string) {
root := cmd.Root()
root.SetArgs([]string{"--version"})
if err := root.Execute(); err != nil {
lg := logger.NewHCLogger(runCmdCfg.LogLevel, fmt.Sprintf("%s-%s", appName, version), os.Stdout)
lg.Error("Failed to get version details", "error", err)
os.Exit(1)
}
}
================================================
FILE: download.sh
================================================
#!/bin/bash
set -euo pipefail
USER='controlplaneio'
REPO='netassert'
BINARY='netassert'
PWD=$(pwd)
LATEST=$(curl --silent "https://api.github.com/repos/$USER/$REPO/releases/latest" | grep '"tag_name":' | cut -d'"' -f4)
echo "Found latest release: $LATEST"
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
echo "OS: $OS"
ARCH=$(uname -m)
if [[ "$ARCH" == "x86_64" ]]; then
ARCH="amd64"
fi
echo "ARCH: $ARCH"
FILE="${BINARY}_${LATEST}_${OS}_${ARCH}.tar.gz"
DOWNLOAD_URL="https://github.com/controlplaneio/${REPO}/releases/download/${LATEST}/${FILE}"
CHECKSUM_URL="https://github.com/controlplaneio/${REPO}/releases/download/${LATEST}/checksums-sha256.txt"
echo "[+] Downloading latest checksums from ${CHECKSUM_URL}"
if ! curl -sfLo "checksums.txt" "$CHECKSUM_URL"; then
echo "Failed to download checksums"
exit 1
fi
echo "[+] Downloading latest tarball from ${DOWNLOAD_URL}"
if ! curl -sfLO "$DOWNLOAD_URL"; then
echo "Failed to download tarball"
exit 1
fi
echo "[+] Verifying checksums"
if ! sha256sum -c checksums.txt --ignore-missing; then
echo "[+] Checksum verification failed"
exit 1
fi
echo "[+] Downloaded file verified successfully"
## unzip the tarball
echo "[+] Unzipping the downloaded tarball in directory ${PWD}"
if ! tar -xzf "${FILE}"; then
echo "[+] Failed to unzip the downloaded tarball"
exit 1
fi
echo "[+] Downloaded file unzipped successfully"
if [[ ! -f "${BINARY}" ]]; then
echo "[+] ${BINARY} file was not found in the current path"
exit 1
fi
echo "[+] You can now run netassert from ${PWD}/${BINARY}"
================================================
FILE: e2e/README.md
================================================
# End-to-End(E2E) Tests
The E2E tests uses `terraform` and `terratest` to spin up GKE and EKS clusters. There are altogether four tests:
- AWS EKS 1.34 with AWS VPC CNI
- Test AWS EKS 1.34 with the default AWS VPC CNI that support Network Policies
- AWS EKS 1.34 with Calico CNI
- Test AWS EKS 1.34 with calico CNI v3.35.0. As part of the test, the AWS CNI is uninstalled and Calico is installed
- GCP GKE 1.33 with GCP VPC CNI
- GKE 1.33 with GCP VPC CNI
- GCP GKE 1.33 GCP Dataplane v2
- GKE 1.33 with data-plane v2 based on Cilium
- Kind k8s 1.35 with Calico CNI
- Kind with Calico CNI
*Each test is skipped if the corresponding environment variable is not set.*
| Test | Environment variable |
|-------------------------------------|----------------------|
| AWS EKS with AWS VPC CNI | EKS_VPC_E2E_TESTS |
| AWS EKS with Calico CNI | EKS_CALICO_E2E_TESTS |
| GCP GKE with GCP VPC CNI | GKE_VPC_E2E_TESTS |
| GCP GKE GCP with DataPlane V2 | GKE_DPV2_E2E_TESTS |
| Kind with Calico CNI | KIND_E2E_TESTS |
## Running tests
- Make sure you have installed `kubectl` and `AWS Cli v2`
- Make sure you have also installed `gke-gcloud-auth-plugin` for kubectl by following this [link](https://cloud.google.com/blog/products/containers-kubernetes/kubectl-auth-changes-in-gke)
For AWS EKS tests, make sure you have valid AWS credentials:
```bash
❯ aws sso login
❯ export EKS_VPC_E2E_TESTS=yes
❯ export EKS_CALICO_E2E_TESTS=yes
```
For GCP GKE tests, make sure you export the Project name and set it as the default project:
```bash
❯ export GOOGLE_PROJECT=
❯ gcloud config set project
❯ gcloud auth application-default login
❯ export GKE_VPC_E2E_TESTS=yes
❯ export GKE_DPV2_E2E_TESTS=yes
```
Run the tests using the following command:
```bash
# from the root of the project
❯ go test -timeout=91m -v ./e2e/... -count=1
```
Tests can be configured by updating values in [end-to-end test helpers](./helpers/)
## Azure AKS Integration
Currently, end-to-end testing of NetAssert with Azure Kubernetes Service (AKS) is not scheduled. However, we do not foresee any architectural reasons that would prevent successful integration.
### Network Policy Support
There are [three primary approaches](https://learn.microsoft.com/en-us/azure/aks/use-network-policies) for supporting network policies in AKS.
If the requirement is limited to **Linux nodes only** (excluding Windows), the recommended solution is [Azure CNI powered by Cilium](https://learn.microsoft.com/en-us/azure/aks/azure-cni-powered-by-cilium).
### Deployment via Terraform
For deploying a testing cluster, the Container Network Interface (CNI) configuration appears straightforward. It can likely be handled via a single parameter in the `azurerm` provider, specifically the [`network_policy` argument](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster#network_policy-1).
*Note: This Terraform configuration has yet to be validated.*
================================================
FILE: e2e/clusters/aws-eks-terraform-module/eks.tf
================================================
provider "aws" {
region = var.region
}
data "aws_availability_zones" "available" {}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~> 20.0"
#version = "~> 19"
cluster_name = var.cluster_name
cluster_version = var.cluster_version
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
cluster_endpoint_public_access = true
enable_cluster_creator_admin_permissions = true
cluster_addons = {
vpc-cni = {
before_compute = true
most_recent = true
configuration_values = jsonencode({
#resolve_conflicts_on_update = "OVERWRITE"
enableNetworkPolicy = var.enable_vpc_network_policies ? "true" : "false"
})
}
}
eks_managed_node_groups = {
example = {
name = "${var.node_group_name}1"
# Starting on 1.30, AL2023 is the default AMI type for EKS managed node groups
ami_type = "AL2023_x86_64_STANDARD"
instance_types = ["t3.medium"]
min_size = 0
max_size = 3
desired_size = var.desired_size
}
}
# Extend node-to-node security group rules
node_security_group_additional_rules = {
ingress_self_all = {
description = "Node to node all ports/protocols"
protocol = "-1"
from_port = 0
to_port = 0
type = "ingress"
self = true
}
egress_all = {
description = "Node all egress"
protocol = "-1"
from_port = 0
to_port = 0
type = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
}
}
resource "null_resource" "generate_kubeconfig" {
depends_on = [module.eks]
provisioner "local-exec" {
command = "aws eks update-kubeconfig --region ${var.region} --name ${module.eks.cluster_name} --kubeconfig ${var.kubeconfig_file}"
}
}
================================================
FILE: e2e/clusters/aws-eks-terraform-module/outputs.tf
================================================
output "cluster_endpoint" {
description = "Endpoint for EKS control plane"
value = module.eks.cluster_endpoint
}
output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane"
value = module.eks.cluster_security_group_id
}
output "region" {
description = "AWS region"
value = var.region
}
output "cluster_name" {
description = "Kubernetes Cluster Name"
value = module.eks.cluster_name
}
================================================
FILE: e2e/clusters/aws-eks-terraform-module/variables.tf
================================================
variable "region" {
description = "AWS region"
type = string
}
variable "cluster_version" {
description = "The AWS EKS cluster version"
type = string
}
variable "cluster_name" {
type = string
description = "name of the cluster and VPC"
}
variable "kubeconfig_file" {
type = string
description = "name of the file that contains the kubeconfig information"
default = ".kubeconfig"
}
variable "desired_size" {
type = number
description = "desired size of the worker node pool"
default = 0
}
variable "node_group_name" {
type = string
description = "prefix of the node group"
default = "group"
}
variable "enable_vpc_network_policies" {
type = bool
description = "enable or disable vpc network policies"
}
================================================
FILE: e2e/clusters/aws-eks-terraform-module/vpc.tf
================================================
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
// set VPC name same as the EKS cluster name
name = var.cluster_name
version = "~> 5.0"
cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
enable_dns_support = true
public_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
tags = {
owner = "prefix"
environment = "test"
}
}
================================================
FILE: e2e/clusters/eks-with-calico-cni/calico-3.26.4.yaml
================================================
---
# Source: calico/templates/calico-kube-controllers.yaml
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
---
# Source: calico/templates/calico-kube-controllers.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
veth_mtu: "0"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"log_file_path": "/var/log/calico/cni/cni.log",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPConfiguration
listKind: BGPConfigurationList
plural: bgpconfigurations
singular: bgpconfiguration
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: BGPConfiguration contains the configuration for any BGP routing.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: BGPConfigurationSpec contains the values of the BGP configuration.
properties:
asNumber:
description: 'ASNumber is the default AS number used by a node. [Default:
64512]'
format: int32
type: integer
bindMode:
description: BindMode indicates whether to listen for BGP connections
on all addresses (None) or only on the node's canonical IP address
Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen
for BGP connections on all addresses.
type: string
communities:
description: Communities is a list of BGP community values and their
arbitrary names for tagging routes.
items:
description: Community contains standard or large community value
and its name.
properties:
name:
description: Name given to community value.
type: string
value:
description: Value must be of format `aa:nn` or `aa:nn:mm`.
For standard community use `aa:nn` format, where `aa` and
`nn` are 16 bit number. For large community use `aa:nn:mm`
format, where `aa`, `nn` and `mm` are 32 bit number. Where,
`aa` is an AS Number, `nn` and `mm` are per-AS identifier.
pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$
type: string
type: object
type: array
ignoredInterfaces:
description: IgnoredInterfaces indicates the network interfaces that
needs to be excluded when reading device routes.
items:
type: string
type: array
listenPort:
description: ListenPort is the port where BGP protocol should listen.
Defaults to 179
maximum: 65535
minimum: 1
type: integer
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which logs
are sent to the stdout. [Default: INFO]'
type: string
nodeMeshMaxRestartTime:
description: Time to allow for software restart for node-to-mesh peerings. When
specified, this is configured as the graceful restart timeout. When
not specified, the BIRD default of 120s is used. This field can
only be set on the default BGPConfiguration instance and requires
that NodeMesh is enabled
type: string
nodeMeshPassword:
description: Optional BGP password for full node-to-mesh peerings.
This field can only be set on the default BGPConfiguration instance
and requires that NodeMesh is enabled
properties:
secretKeyRef:
description: Selects a key of a secret in the node pod's namespace.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
type: object
nodeToNodeMeshEnabled:
description: 'NodeToNodeMeshEnabled sets whether full node to node
BGP mesh is enabled. [Default: true]'
type: boolean
prefixAdvertisements:
description: PrefixAdvertisements contains per-prefix advertisement
configuration.
items:
description: PrefixAdvertisement configures advertisement properties
for the specified CIDR.
properties:
cidr:
description: CIDR for which properties should be advertised.
type: string
communities:
description: Communities can be list of either community names
already defined in `Specs.Communities` or community value
of format `aa:nn` or `aa:nn:mm`. For standard community use
`aa:nn` format, where `aa` and `nn` are 16 bit number. For
large community use `aa:nn:mm` format, where `aa`, `nn` and
`mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and
`mm` are per-AS identifier.
items:
type: string
type: array
type: object
type: array
serviceClusterIPs:
description: ServiceClusterIPs are the CIDR blocks from which service
cluster IPs are allocated. If specified, Calico will advertise these
blocks, as well as any cluster IPs within them.
items:
description: ServiceClusterIPBlock represents a single allowed ClusterIP
CIDR block.
properties:
cidr:
type: string
type: object
type: array
serviceExternalIPs:
description: ServiceExternalIPs are the CIDR blocks for Kubernetes
Service External IPs. Kubernetes Service ExternalIPs will only be
advertised if they are within one of these blocks.
items:
description: ServiceExternalIPBlock represents a single allowed
External IP CIDR block.
properties:
cidr:
type: string
type: object
type: array
serviceLoadBalancerIPs:
description: ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes
Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress
IPs will only be advertised if they are within one of these blocks.
items:
description: ServiceLoadBalancerIPBlock represents a single allowed
LoadBalancer IP CIDR block.
properties:
cidr:
type: string
type: object
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: (devel)
creationTimestamp: null
name: bgpfilters.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPFilter
listKind: BGPFilterList
plural: bgpfilters
singular: bgpfilter
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: BGPFilterSpec contains the IPv4 and IPv6 filter rules of
the BGP Filter.
properties:
exportV4:
description: The ordered set of IPv4 BGPFilter rules acting on exporting
routes to a peer.
items:
description: BGPFilterRuleV4 defines a BGP filter rule consisting
a single IPv4 CIDR block and a filter action for this CIDR.
properties:
action:
type: string
cidr:
type: string
matchOperator:
type: string
required:
- action
- cidr
- matchOperator
type: object
type: array
exportV6:
description: The ordered set of IPv6 BGPFilter rules acting on exporting
routes to a peer.
items:
description: BGPFilterRuleV6 defines a BGP filter rule consisting
a single IPv6 CIDR block and a filter action for this CIDR.
properties:
action:
type: string
cidr:
type: string
matchOperator:
type: string
required:
- action
- cidr
- matchOperator
type: object
type: array
importV4:
description: The ordered set of IPv4 BGPFilter rules acting on importing
routes from a peer.
items:
description: BGPFilterRuleV4 defines a BGP filter rule consisting
a single IPv4 CIDR block and a filter action for this CIDR.
properties:
action:
type: string
cidr:
type: string
matchOperator:
type: string
required:
- action
- cidr
- matchOperator
type: object
type: array
importV6:
description: The ordered set of IPv6 BGPFilter rules acting on importing
routes from a peer.
items:
description: BGPFilterRuleV6 defines a BGP filter rule consisting
a single IPv6 CIDR block and a filter action for this CIDR.
properties:
action:
type: string
cidr:
type: string
matchOperator:
type: string
required:
- action
- cidr
- matchOperator
type: object
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BGPPeer
listKind: BGPPeerList
plural: bgppeers
singular: bgppeer
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: BGPPeerSpec contains the specification for a BGPPeer resource.
properties:
asNumber:
description: The AS Number of the peer.
format: int32
type: integer
filters:
description: The ordered set of BGPFilters applied on this BGP peer.
items:
type: string
type: array
keepOriginalNextHop:
description: Option to keep the original nexthop field when routes
are sent to a BGP Peer. Setting "true" configures the selected BGP
Peers node to use the "next hop keep;" instead of "next hop self;"(default)
in the specific branch of the Node on "bird.cfg".
type: boolean
maxRestartTime:
description: Time to allow for software restart. When specified,
this is configured as the graceful restart timeout. When not specified,
the BIRD default of 120s is used.
type: string
node:
description: The node name identifying the Calico node instance that
is targeted by this peer. If this is not set, and no nodeSelector
is specified, then this BGP peer selects all nodes in the cluster.
type: string
nodeSelector:
description: Selector for the nodes that should have this peering. When
this is set, the Node field must be empty.
type: string
numAllowedLocalASNumbers:
description: Maximum number of local AS numbers that are allowed in
the AS path for received routes. This removes BGP loop prevention
and should only be used if absolutely necesssary.
format: int32
type: integer
password:
description: Optional BGP password for the peerings generated by this
BGPPeer resource.
properties:
secretKeyRef:
description: Selects a key of a secret in the node pod's namespace.
properties:
key:
description: The key of the secret to select from. Must be
a valid secret key.
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
TODO: Add other useful fields. apiVersion, kind, uid?'
type: string
optional:
description: Specify whether the Secret or its key must be
defined
type: boolean
required:
- key
type: object
type: object
peerIP:
description: The IP address of the peer followed by an optional port
number to peer with. If port number is given, format should be `[]:port`
or `:` for IPv4. If optional port number is not set,
and this peer IP and ASNumber belongs to a calico/node with ListenPort
set in BGPConfiguration, then we use that port to peer.
type: string
peerSelector:
description: Selector for the remote nodes to peer with. When this
is set, the PeerIP and ASNumber fields must be empty. For each
peering between the local node and selected remote nodes, we configure
an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified,
and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The
remote AS number comes from the remote node's NodeBGPSpec.ASNumber,
or the global default if that is not set.
type: string
reachableBy:
description: Add an exact, i.e. /32, static route toward peer IP in
order to prevent route flapping. ReachableBy contains the address
of the gateway which peer can be reached by.
type: string
sourceAddress:
description: Specifies whether and how to configure a source address
for the peerings generated by this BGPPeer resource. Default value
"UseNodeIP" means to configure the node IP as the source address. "None"
means not to configure a source address.
type: string
ttlSecurity:
description: TTLSecurity enables the generalized TTL security mechanism
(GTSM) which protects against spoofed packets by ignoring received
packets with a smaller than expected TTL value. The provided value
is the number of hops (edges) between the peers.
type: integer
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: BlockAffinity
listKind: BlockAffinityList
plural: blockaffinities
singular: blockaffinity
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: BlockAffinitySpec contains the specification for a BlockAffinity
resource.
properties:
cidr:
type: string
deleted:
description: Deleted indicates that this block affinity is being deleted.
This field is a string for compatibility with older releases that
mistakenly treat this field as a string.
type: string
node:
type: string
state:
type: string
required:
- cidr
- deleted
- node
- state
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: (devel)
creationTimestamp: null
name: caliconodestatuses.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: CalicoNodeStatus
listKind: CalicoNodeStatusList
plural: caliconodestatuses
singular: caliconodestatus
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus
resource.
properties:
classes:
description: Classes declares the types of information to monitor
for this calico/node, and allows for selective status reporting
about certain subsets of information.
items:
type: string
type: array
node:
description: The node name identifies the Calico node instance for
node status.
type: string
updatePeriodSeconds:
description: UpdatePeriodSeconds is the period at which CalicoNodeStatus
should be updated. Set to 0 to disable CalicoNodeStatus refresh.
Maximum update period is one day.
format: int32
type: integer
type: object
status:
description: CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus.
No validation needed for status since it is updated by Calico.
properties:
agent:
description: Agent holds agent status on the node.
properties:
birdV4:
description: BIRDV4 represents the latest observed status of bird4.
properties:
lastBootTime:
description: LastBootTime holds the value of lastBootTime
from bird.ctl output.
type: string
lastReconfigurationTime:
description: LastReconfigurationTime holds the value of lastReconfigTime
from bird.ctl output.
type: string
routerID:
description: Router ID used by bird.
type: string
state:
description: The state of the BGP Daemon.
type: string
version:
description: Version of the BGP daemon
type: string
type: object
birdV6:
description: BIRDV6 represents the latest observed status of bird6.
properties:
lastBootTime:
description: LastBootTime holds the value of lastBootTime
from bird.ctl output.
type: string
lastReconfigurationTime:
description: LastReconfigurationTime holds the value of lastReconfigTime
from bird.ctl output.
type: string
routerID:
description: Router ID used by bird.
type: string
state:
description: The state of the BGP Daemon.
type: string
version:
description: Version of the BGP daemon
type: string
type: object
type: object
bgp:
description: BGP holds node BGP status.
properties:
numberEstablishedV4:
description: The total number of IPv4 established bgp sessions.
type: integer
numberEstablishedV6:
description: The total number of IPv6 established bgp sessions.
type: integer
numberNotEstablishedV4:
description: The total number of IPv4 non-established bgp sessions.
type: integer
numberNotEstablishedV6:
description: The total number of IPv6 non-established bgp sessions.
type: integer
peersV4:
description: PeersV4 represents IPv4 BGP peers status on the node.
items:
description: CalicoNodePeer contains the status of BGP peers
on the node.
properties:
peerIP:
description: IP address of the peer whose condition we are
reporting.
type: string
since:
description: Since the state or reason last changed.
type: string
state:
description: State is the BGP session state.
type: string
type:
description: Type indicates whether this peer is configured
via the node-to-node mesh, or via en explicit global or
per-node BGPPeer object.
type: string
type: object
type: array
peersV6:
description: PeersV6 represents IPv6 BGP peers status on the node.
items:
description: CalicoNodePeer contains the status of BGP peers
on the node.
properties:
peerIP:
description: IP address of the peer whose condition we are
reporting.
type: string
since:
description: Since the state or reason last changed.
type: string
state:
description: State is the BGP session state.
type: string
type:
description: Type indicates whether this peer is configured
via the node-to-node mesh, or via en explicit global or
per-node BGPPeer object.
type: string
type: object
type: array
required:
- numberEstablishedV4
- numberEstablishedV6
- numberNotEstablishedV4
- numberNotEstablishedV6
type: object
lastUpdated:
description: LastUpdated is a timestamp representing the server time
when CalicoNodeStatus object last updated. It is represented in
RFC3339 form and is in UTC.
format: date-time
nullable: true
type: string
routes:
description: Routes reports routes known to the Calico BGP daemon
on the node.
properties:
routesV4:
description: RoutesV4 represents IPv4 routes on the node.
items:
description: CalicoNodeRoute contains the status of BGP routes
on the node.
properties:
destination:
description: Destination of the route.
type: string
gateway:
description: Gateway for the destination.
type: string
interface:
description: Interface for the destination
type: string
learnedFrom:
description: LearnedFrom contains information regarding
where this route originated.
properties:
peerIP:
description: If sourceType is NodeMesh or BGPPeer, IP
address of the router that sent us this route.
type: string
sourceType:
description: Type of the source where a route is learned
from.
type: string
type: object
type:
description: Type indicates if the route is being used for
forwarding or not.
type: string
type: object
type: array
routesV6:
description: RoutesV6 represents IPv6 routes on the node.
items:
description: CalicoNodeRoute contains the status of BGP routes
on the node.
properties:
destination:
description: Destination of the route.
type: string
gateway:
description: Gateway for the destination.
type: string
interface:
description: Interface for the destination
type: string
learnedFrom:
description: LearnedFrom contains information regarding
where this route originated.
properties:
peerIP:
description: If sourceType is NodeMesh or BGPPeer, IP
address of the router that sent us this route.
type: string
sourceType:
description: Type of the source where a route is learned
from.
type: string
type: object
type:
description: Type indicates if the route is being used for
forwarding or not.
type: string
type: object
type: array
type: object
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: ClusterInformation
listKind: ClusterInformationList
plural: clusterinformations
singular: clusterinformation
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: ClusterInformation contains the cluster specific information.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ClusterInformationSpec contains the values of describing
the cluster.
properties:
calicoVersion:
description: CalicoVersion is the version of Calico that the cluster
is running
type: string
clusterGUID:
description: ClusterGUID is the GUID of the cluster
type: string
clusterType:
description: ClusterType describes the type of the cluster
type: string
datastoreReady:
description: DatastoreReady is used during significant datastore migrations
to signal to components such as Felix that it should wait before
accessing the datastore.
type: boolean
variant:
description: Variant declares which variant of Calico should be active.
type: string
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: FelixConfiguration
listKind: FelixConfigurationList
plural: felixconfigurations
singular: felixconfiguration
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: Felix Configuration contains the configuration for Felix.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: FelixConfigurationSpec contains the values of the Felix configuration.
properties:
allowIPIPPacketsFromWorkloads:
description: 'AllowIPIPPacketsFromWorkloads controls whether Felix
will add a rule to drop IPIP encapsulated traffic from workloads
[Default: false]'
type: boolean
allowVXLANPacketsFromWorkloads:
description: 'AllowVXLANPacketsFromWorkloads controls whether Felix
will add a rule to drop VXLAN encapsulated traffic from workloads
[Default: false]'
type: boolean
awsSrcDstCheck:
description: 'Set source-destination-check on AWS EC2 instances. Accepted
value must be one of "DoNothing", "Enable" or "Disable". [Default:
DoNothing]'
enum:
- DoNothing
- Enable
- Disable
type: string
bpfConnectTimeLoadBalancingEnabled:
description: 'BPFConnectTimeLoadBalancingEnabled when in BPF mode,
controls whether Felix installs the connection-time load balancer. The
connect-time load balancer is required for the host to be able to
reach Kubernetes services and it improves the performance of pod-to-service
connections. The only reason to disable it is for debugging purposes. [Default:
true]'
type: boolean
bpfDSROptoutCIDRs:
description: BPFDSROptoutCIDRs is a list of CIDRs which are excluded
from DSR. That is, clients in those CIDRs will accesses nodeports
as if BPFExternalServiceMode was set to Tunnel.
items:
type: string
type: array
bpfDataIfacePattern:
description: BPFDataIfacePattern is a regular expression that controls
which interfaces Felix should attach BPF programs to in order to
catch traffic to/from the network. This needs to match the interfaces
that Calico workload traffic flows over as well as any interfaces
that handle incoming traffic to nodeports and services from outside
the cluster. It should not match the workload interfaces (usually
named cali...).
type: string
bpfDisableUnprivileged:
description: 'BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled
sysctl to disable unprivileged use of BPF. This ensures that unprivileged
users cannot access Calico''s BPF maps and cannot insert their own
BPF programs to interfere with Calico''s. [Default: true]'
type: boolean
bpfEnabled:
description: 'BPFEnabled, if enabled Felix will use the BPF dataplane.
[Default: false]'
type: boolean
bpfEnforceRPF:
description: 'BPFEnforceRPF enforce strict RPF on all host interfaces
with BPF programs regardless of what is the per-interfaces or global
setting. Possible values are Disabled, Strict or Loose. [Default:
Loose]'
type: string
bpfExtToServiceConnmark:
description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit
mark that is set on connections from an external client to a local
service. This mark allows us to control how packets of that connection
are routed within the host and how is routing interpreted by RPF
check. [Default: 0]'
type: integer
bpfExternalServiceMode:
description: 'BPFExternalServiceMode in BPF mode, controls how connections
from outside the cluster to services (node ports and cluster IPs)
are forwarded to remote workloads. If set to "Tunnel" then both
request and response traffic is tunneled to the remote node. If
set to "DSR", the request traffic is tunneled but the response traffic
is sent directly from the remote node. In "DSR" mode, the remote
node appears to use the IP of the ingress node; this requires a
permissive L2 network. [Default: Tunnel]'
type: string
bpfHostConntrackBypass:
description: 'BPFHostConntrackBypass Controls whether to bypass Linux
conntrack in BPF mode for workloads and services. [Default: true
- bypass Linux conntrack]'
type: boolean
bpfKubeProxyEndpointSlicesEnabled:
description: BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls
whether Felix's embedded kube-proxy accepts EndpointSlices or not.
type: boolean
bpfKubeProxyIptablesCleanupEnabled:
description: 'BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF
mode, Felix will proactively clean up the upstream Kubernetes kube-proxy''s
iptables chains. Should only be enabled if kube-proxy is not running. [Default:
true]'
type: boolean
bpfKubeProxyMinSyncPeriod:
description: 'BPFKubeProxyMinSyncPeriod, in BPF mode, controls the
minimum time between updates to the dataplane for Felix''s embedded
kube-proxy. Lower values give reduced set-up latency. Higher values
reduce Felix CPU usage by batching up more work. [Default: 1s]'
type: string
bpfL3IfacePattern:
description: BPFL3IfacePattern is a regular expression that allows
to list tunnel devices like wireguard or vxlan (i.e., L3 devices)
in addition to BPFDataIfacePattern. That is, tunnel interfaces not
created by Calico, that Calico workload traffic flows over as well
as any interfaces that handle incoming traffic to nodeports and
services from outside the cluster.
type: string
bpfLogLevel:
description: 'BPFLogLevel controls the log level of the BPF programs
when in BPF dataplane mode. One of "Off", "Info", or "Debug". The
logs are emitted to the BPF trace pipe, accessible with the command
`tc exec bpf debug`. [Default: Off].'
type: string
bpfMapSizeConntrack:
description: 'BPFMapSizeConntrack sets the size for the conntrack
map. This map must be large enough to hold an entry for each active
connection. Warning: changing the size of the conntrack map can
cause disruption.'
type: integer
bpfMapSizeIPSets:
description: BPFMapSizeIPSets sets the size for ipsets map. The IP
sets map must be large enough to hold an entry for each endpoint
matched by every selector in the source/destination matches in network
policy. Selectors such as "all()" can result in large numbers of
entries (one entry per endpoint in that case).
type: integer
bpfMapSizeIfState:
description: BPFMapSizeIfState sets the size for ifstate map. The
ifstate map must be large enough to hold an entry for each device
(host + workloads) on a host.
type: integer
bpfMapSizeNATAffinity:
type: integer
bpfMapSizeNATBackend:
description: BPFMapSizeNATBackend sets the size for nat back end map.
This is the total number of endpoints. This is mostly more than
the size of the number of services.
type: integer
bpfMapSizeNATFrontend:
description: BPFMapSizeNATFrontend sets the size for nat front end
map. FrontendMap should be large enough to hold an entry for each
nodeport, external IP and each port in each service.
type: integer
bpfMapSizeRoute:
description: BPFMapSizeRoute sets the size for the routes map. The
routes map should be large enough to hold one entry per workload
and a handful of entries per host (enough to cover its own IPs and
tunnel IPs).
type: integer
bpfPSNATPorts:
anyOf:
- type: integer
- type: string
description: 'BPFPSNATPorts sets the range from which we randomly
pick a port if there is a source port collision. This should be
within the ephemeral range as defined by RFC 6056 (1024–65535) and
preferably outside the ephemeral ranges used by common operating
systems. Linux uses 32768–60999, while others mostly use the IANA
defined range 49152–65535. It is not necessarily a problem if this
range overlaps with the operating systems. Both ends of the range
are inclusive. [Default: 20000:29999]'
pattern: ^.*
x-kubernetes-int-or-string: true
bpfPolicyDebugEnabled:
description: BPFPolicyDebugEnabled when true, Felix records detailed
information about the BPF policy programs, which can be examined
with the calico-bpf command-line tool.
type: boolean
chainInsertMode:
description: 'ChainInsertMode controls whether Felix hooks the kernel''s
top-level iptables chains by inserting a rule at the top of the
chain or by appending a rule at the bottom. insert is the safe default
since it prevents Calico''s rules from being bypassed. If you switch
to append mode, be sure that the other rules in the chains signal
acceptance by falling through to the Calico rules, otherwise the
Calico policy will be bypassed. [Default: insert]'
type: string
dataplaneDriver:
description: DataplaneDriver filename of the external dataplane driver
to use. Only used if UseInternalDataplaneDriver is set to false.
type: string
dataplaneWatchdogTimeout:
description: "DataplaneWatchdogTimeout is the readiness/liveness timeout
used for Felix's (internal) dataplane driver. Increase this value
if you experience spurious non-ready or non-live events when Felix
is under heavy load. Decrease the value to get felix to report non-live
or non-ready more quickly. [Default: 90s] \n Deprecated: replaced
by the generic HealthTimeoutOverrides."
type: string
debugDisableLogDropping:
type: boolean
debugMemoryProfilePath:
type: string
debugSimulateCalcGraphHangAfter:
type: string
debugSimulateDataplaneHangAfter:
type: string
defaultEndpointToHostAction:
description: 'DefaultEndpointToHostAction controls what happens to
traffic that goes from a workload endpoint to the host itself (after
the traffic hits the endpoint egress policy). By default Calico
blocks traffic from workload endpoints to the host itself with an
iptables "DROP" action. If you want to allow some or all traffic
from endpoint to host, set this parameter to RETURN or ACCEPT. Use
RETURN if you have your own rules in the iptables "INPUT" chain;
Calico will insert its rules at the top of that chain, then "RETURN"
packets to the "INPUT" chain once it has completed processing workload
endpoint egress policy. Use ACCEPT to unconditionally accept packets
from workloads after processing workload endpoint egress policy.
[Default: Drop]'
type: string
deviceRouteProtocol:
description: This defines the route protocol added to programmed device
routes, by default this will be RTPROT_BOOT when left blank.
type: integer
deviceRouteSourceAddress:
description: This is the IPv4 source address to use on programmed
device routes. By default the source address is left blank, leaving
the kernel to choose the source address used.
type: string
deviceRouteSourceAddressIPv6:
description: This is the IPv6 source address to use on programmed
device routes. By default the source address is left blank, leaving
the kernel to choose the source address used.
type: string
disableConntrackInvalidCheck:
type: boolean
endpointReportingDelay:
type: string
endpointReportingEnabled:
type: boolean
externalNodesList:
description: ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes
which may source tunnel traffic and have the tunneled traffic be
accepted at calico nodes.
items:
type: string
type: array
failsafeInboundHostPorts:
description: 'FailsafeInboundHostPorts is a list of UDP/TCP ports
and CIDRs that Felix will allow incoming traffic to host endpoints
on irrespective of the security policy. This is useful to avoid
accidentally cutting off a host with incorrect configuration. For
back-compatibility, if the protocol is not specified, it defaults
to "tcp". If a CIDR is not specified, it will allow traffic from
all addresses. To disable all inbound host ports, use the value
none. The default value allows ssh access and DHCP. [Default: tcp:22,
udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]'
items:
description: ProtoPort is combination of protocol, port, and CIDR.
Protocol and port must be specified.
properties:
net:
type: string
port:
type: integer
protocol:
type: string
required:
- port
- protocol
type: object
type: array
failsafeOutboundHostPorts:
description: 'FailsafeOutboundHostPorts is a list of UDP/TCP ports
and CIDRs that Felix will allow outgoing traffic from host endpoints
to irrespective of the security policy. This is useful to avoid
accidentally cutting off a host with incorrect configuration. For
back-compatibility, if the protocol is not specified, it defaults
to "tcp". If a CIDR is not specified, it will allow traffic from
all addresses. To disable all outbound host ports, use the value
none. The default value opens etcd''s standard ports to ensure that
Felix does not get cut off from etcd as well as allowing DHCP and
DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666,
tcp:6667, udp:53, udp:67]'
items:
description: ProtoPort is combination of protocol, port, and CIDR.
Protocol and port must be specified.
properties:
net:
type: string
port:
type: integer
protocol:
type: string
required:
- port
- protocol
type: object
type: array
featureDetectOverride:
description: FeatureDetectOverride is used to override feature detection
based on auto-detected platform capabilities. Values are specified
in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true"
or "false" will force the feature, empty or omitted values are auto-detected.
type: string
featureGates:
description: FeatureGates is used to enable or disable tech-preview
Calico features. Values are specified in a comma separated list
with no spaces, example; "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false".
This is used to enable features that are not fully production ready.
type: string
floatingIPs:
description: FloatingIPs configures whether or not Felix will program
non-OpenStack floating IP addresses. (OpenStack-derived floating
IPs are always programmed, regardless of this setting.)
enum:
- Enabled
- Disabled
type: string
genericXDPEnabled:
description: 'GenericXDPEnabled enables Generic XDP so network cards
that don''t support XDP offload or driver modes can use XDP. This
is not recommended since it doesn''t provide better performance
than iptables. [Default: false]'
type: boolean
healthEnabled:
type: boolean
healthHost:
type: string
healthPort:
type: integer
healthTimeoutOverrides:
description: HealthTimeoutOverrides allows the internal watchdog timeouts
of individual subcomponents to be overridden. This is useful for
working around "false positive" liveness timeouts that can occur
in particularly stressful workloads or if CPU is constrained. For
a list of active subcomponents, see Felix's logs.
items:
properties:
name:
type: string
timeout:
type: string
required:
- name
- timeout
type: object
type: array
interfaceExclude:
description: 'InterfaceExclude is a comma-separated list of interfaces
that Felix should exclude when monitoring for host endpoints. The
default value ensures that Felix ignores Kubernetes'' IPVS dummy
interface, which is used internally by kube-proxy. If you want to
exclude multiple interface names using a single value, the list
supports regular expressions. For regular expressions you must wrap
the value with ''/''. For example having values ''/^kube/,veth1''
will exclude all interfaces that begin with ''kube'' and also the
interface ''veth1''. [Default: kube-ipvs0]'
type: string
interfacePrefix:
description: 'InterfacePrefix is the interface name prefix that identifies
workload endpoints and so distinguishes them from host endpoint
interfaces. Note: in environments other than bare metal, the orchestrators
configure this appropriately. For example our Kubernetes and Docker
integrations set the ''cali'' value, and our OpenStack integration
sets the ''tap'' value. [Default: cali]'
type: string
interfaceRefreshInterval:
description: InterfaceRefreshInterval is the period at which Felix
rescans local interfaces to verify their state. The rescan can be
disabled by setting the interval to 0.
type: string
ipipEnabled:
description: 'IPIPEnabled overrides whether Felix should configure
an IPIP interface on the host. Optional as Felix determines this
based on the existing IP pools. [Default: nil (unset)]'
type: boolean
ipipMTU:
description: 'IPIPMTU is the MTU to set on the tunnel device. See
Configuring MTU [Default: 1440]'
type: integer
ipsetsRefreshInterval:
description: 'IpsetsRefreshInterval is the period at which Felix re-checks
all iptables state to ensure that no other process has accidentally
broken Calico''s rules. Set to 0 to disable iptables refresh. [Default:
90s]'
type: string
iptablesBackend:
description: IptablesBackend specifies which backend of iptables will
be used. The default is Auto.
type: string
iptablesFilterAllowAction:
type: string
iptablesFilterDenyAction:
description: IptablesFilterDenyAction controls what happens to traffic
that is denied by network policy. By default Calico blocks traffic
with an iptables "DROP" action. If you want to use "REJECT" action
instead you can configure it in here.
type: string
iptablesLockFilePath:
description: 'IptablesLockFilePath is the location of the iptables
lock file. You may need to change this if the lock file is not in
its standard location (for example if you have mapped it into Felix''s
container at a different path). [Default: /run/xtables.lock]'
type: string
iptablesLockProbeInterval:
description: 'IptablesLockProbeInterval is the time that Felix will
wait between attempts to acquire the iptables lock if it is not
available. Lower values make Felix more responsive when the lock
is contended, but use more CPU. [Default: 50ms]'
type: string
iptablesLockTimeout:
description: 'IptablesLockTimeout is the time that Felix will wait
for the iptables lock, or 0, to disable. To use this feature, Felix
must share the iptables lock file with all other processes that
also take the lock. When running Felix inside a container, this
requires the /run directory of the host to be mounted into the calico/node
or calico/felix container. [Default: 0s disabled]'
type: string
iptablesMangleAllowAction:
type: string
iptablesMarkMask:
description: 'IptablesMarkMask is the mask that Felix selects its
IPTables Mark bits from. Should be a 32 bit hexadecimal number with
at least 8 bits set, none of which clash with any other mark bits
in use on the system. [Default: 0xff000000]'
format: int32
type: integer
iptablesNATOutgoingInterfaceFilter:
type: string
iptablesPostWriteCheckInterval:
description: 'IptablesPostWriteCheckInterval is the period after Felix
has done a write to the dataplane that it schedules an extra read
back in order to check the write was not clobbered by another process.
This should only occur if another application on the system doesn''t
respect the iptables lock. [Default: 1s]'
type: string
iptablesRefreshInterval:
description: 'IptablesRefreshInterval is the period at which Felix
re-checks the IP sets in the dataplane to ensure that no other process
has accidentally broken Calico''s rules. Set to 0 to disable IP
sets refresh. Note: the default for this value is lower than the
other refresh intervals as a workaround for a Linux kernel bug that
was fixed in kernel version 4.11. If you are using v4.11 or greater
you may want to set this to, a higher value to reduce Felix CPU
usage. [Default: 10s]'
type: string
ipv6Support:
description: IPv6Support controls whether Felix enables support for
IPv6 (if supported by the in-use dataplane).
type: boolean
kubeNodePortRanges:
description: 'KubeNodePortRanges holds list of port ranges used for
service node ports. Only used if felix detects kube-proxy running
in ipvs mode. Felix uses these ranges to separate host and workload
traffic. [Default: 30000:32767].'
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
logDebugFilenameRegex:
description: LogDebugFilenameRegex controls which source code files
have their Debug log output included in the logs. Only logs from
files with names that match the given regular expression are included. The
filter only applies to Debug level logs.
type: string
logFilePath:
description: 'LogFilePath is the full path to the Felix log. Set to
none to disable file logging. [Default: /var/log/calico/felix.log]'
type: string
logPrefix:
description: 'LogPrefix is the log prefix that Felix uses when rendering
LOG rules. [Default: calico-packet]'
type: string
logSeverityFile:
description: 'LogSeverityFile is the log severity above which logs
are sent to the log file. [Default: Info]'
type: string
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which logs
are sent to the stdout. [Default: Info]'
type: string
logSeveritySys:
description: 'LogSeveritySys is the log severity above which logs
are sent to the syslog. Set to None for no logging to syslog. [Default:
Info]'
type: string
maxIpsetSize:
type: integer
metadataAddr:
description: 'MetadataAddr is the IP address or domain name of the
server that can answer VM queries for cloud-init metadata. In OpenStack,
this corresponds to the machine running nova-api (or in Ubuntu,
nova-api-metadata). A value of none (case insensitive) means that
Felix should not set up any NAT rule for the metadata path. [Default:
127.0.0.1]'
type: string
metadataPort:
description: 'MetadataPort is the port of the metadata server. This,
combined with global.MetadataAddr (if not ''None''), is used to
set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort.
In most cases this should not need to be changed [Default: 8775].'
type: integer
mtuIfacePattern:
description: MTUIfacePattern is a regular expression that controls
which interfaces Felix should scan in order to calculate the host's
MTU. This should not match workload interfaces (usually named cali...).
type: string
natOutgoingAddress:
description: NATOutgoingAddress specifies an address to use when performing
source NAT for traffic in a natOutgoing pool that is leaving the
network. By default the address used is an address on the interface
the traffic is leaving on (ie it uses the iptables MASQUERADE target)
type: string
natPortRange:
anyOf:
- type: integer
- type: string
description: NATPortRange specifies the range of ports that is used
for port mapping when doing outgoing NAT. When unset the default
behavior of the network stack is used.
pattern: ^.*
x-kubernetes-int-or-string: true
netlinkTimeout:
type: string
openstackRegion:
description: 'OpenstackRegion is the name of the region that a particular
Felix belongs to. In a multi-region Calico/OpenStack deployment,
this must be configured somehow for each Felix (here in the datamodel,
or in felix.cfg or the environment on each compute node), and must
match the [calico] openstack_region value configured in neutron.conf
on each node. [Default: Empty]'
type: string
policySyncPathPrefix:
description: 'PolicySyncPathPrefix is used to by Felix to communicate
policy changes to external services, like Application layer policy.
[Default: Empty]'
type: string
prometheusGoMetricsEnabled:
description: 'PrometheusGoMetricsEnabled disables Go runtime metrics
collection, which the Prometheus client does by default, when set
to false. This reduces the number of metrics reported, reducing
Prometheus load. [Default: true]'
type: boolean
prometheusMetricsEnabled:
description: 'PrometheusMetricsEnabled enables the Prometheus metrics
server in Felix if set to true. [Default: false]'
type: boolean
prometheusMetricsHost:
description: 'PrometheusMetricsHost is the host that the Prometheus
metrics server should bind to. [Default: empty]'
type: string
prometheusMetricsPort:
description: 'PrometheusMetricsPort is the TCP port that the Prometheus
metrics server should bind to. [Default: 9091]'
type: integer
prometheusProcessMetricsEnabled:
description: 'PrometheusProcessMetricsEnabled disables process metrics
collection, which the Prometheus client does by default, when set
to false. This reduces the number of metrics reported, reducing
Prometheus load. [Default: true]'
type: boolean
prometheusWireGuardMetricsEnabled:
description: 'PrometheusWireGuardMetricsEnabled disables wireguard
metrics collection, which the Prometheus client does by default,
when set to false. This reduces the number of metrics reported,
reducing Prometheus load. [Default: true]'
type: boolean
removeExternalRoutes:
description: Whether or not to remove device routes that have not
been programmed by Felix. Disabling this will allow external applications
to also add device routes. This is enabled by default which means
we will remove externally added routes.
type: boolean
reportingInterval:
description: 'ReportingInterval is the interval at which Felix reports
its status into the datastore or 0 to disable. Must be non-zero
in OpenStack deployments. [Default: 30s]'
type: string
reportingTTL:
description: 'ReportingTTL is the time-to-live setting for process-wide
status reports. [Default: 90s]'
type: string
routeRefreshInterval:
description: 'RouteRefreshInterval is the period at which Felix re-checks
the routes in the dataplane to ensure that no other process has
accidentally broken Calico''s rules. Set to 0 to disable route refresh.
[Default: 90s]'
type: string
routeSource:
description: 'RouteSource configures where Felix gets its routing
information. - WorkloadIPs: use workload endpoints to construct
routes. - CalicoIPAM: the default - use IPAM data to construct routes.'
type: string
routeSyncDisabled:
description: RouteSyncDisabled will disable all operations performed
on the route table. Set to true to run in network-policy mode only.
type: boolean
routeTableRange:
description: Deprecated in favor of RouteTableRanges. Calico programs
additional Linux route tables for various purposes. RouteTableRange
specifies the indices of the route tables that Calico should use.
properties:
max:
type: integer
min:
type: integer
required:
- max
- min
type: object
routeTableRanges:
description: Calico programs additional Linux route tables for various
purposes. RouteTableRanges specifies a set of table index ranges
that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`.
items:
properties:
max:
type: integer
min:
type: integer
required:
- max
- min
type: object
type: array
serviceLoopPrevention:
description: 'When service IP advertisement is enabled, prevent routing
loops to service IPs that are not in use, by dropping or rejecting
packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled",
in which case such routing loops continue to be allowed. [Default:
Drop]'
type: string
sidecarAccelerationEnabled:
description: 'SidecarAccelerationEnabled enables experimental sidecar
acceleration [Default: false]'
type: boolean
usageReportingEnabled:
description: 'UsageReportingEnabled reports anonymous Calico version
number and cluster size to projectcalico.org. Logs warnings returned
by the usage server. For example, if a significant security vulnerability
has been discovered in the version of Calico being used. [Default:
true]'
type: boolean
usageReportingInitialDelay:
description: 'UsageReportingInitialDelay controls the minimum delay
before Felix makes a report. [Default: 300s]'
type: string
usageReportingInterval:
description: 'UsageReportingInterval controls the interval at which
Felix makes reports. [Default: 86400s]'
type: string
useInternalDataplaneDriver:
description: UseInternalDataplaneDriver, if true, Felix will use its
internal dataplane programming logic. If false, it will launch
an external dataplane driver and communicate with it over protobuf.
type: boolean
vxlanEnabled:
description: 'VXLANEnabled overrides whether Felix should create the
VXLAN tunnel device for IPv4 VXLAN networking. Optional as Felix
determines this based on the existing IP pools. [Default: nil (unset)]'
type: boolean
vxlanMTU:
description: 'VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel
device. See Configuring MTU [Default: 1410]'
type: integer
vxlanMTUV6:
description: 'VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel
device. See Configuring MTU [Default: 1390]'
type: integer
vxlanPort:
type: integer
vxlanVNI:
type: integer
wireguardEnabled:
description: 'WireguardEnabled controls whether Wireguard is enabled
for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network).
[Default: false]'
type: boolean
wireguardEnabledV6:
description: 'WireguardEnabledV6 controls whether Wireguard is enabled
for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network).
[Default: false]'
type: boolean
wireguardHostEncryptionEnabled:
description: 'WireguardHostEncryptionEnabled controls whether Wireguard
host-to-host encryption is enabled. [Default: false]'
type: boolean
wireguardInterfaceName:
description: 'WireguardInterfaceName specifies the name to use for
the IPv4 Wireguard interface. [Default: wireguard.cali]'
type: string
wireguardInterfaceNameV6:
description: 'WireguardInterfaceNameV6 specifies the name to use for
the IPv6 Wireguard interface. [Default: wg-v6.cali]'
type: string
wireguardKeepAlive:
description: 'WireguardKeepAlive controls Wireguard PersistentKeepalive
option. Set 0 to disable. [Default: 0]'
type: string
wireguardListeningPort:
description: 'WireguardListeningPort controls the listening port used
by IPv4 Wireguard. [Default: 51820]'
type: integer
wireguardListeningPortV6:
description: 'WireguardListeningPortV6 controls the listening port
used by IPv6 Wireguard. [Default: 51821]'
type: integer
wireguardMTU:
description: 'WireguardMTU controls the MTU on the IPv4 Wireguard
interface. See Configuring MTU [Default: 1440]'
type: integer
wireguardMTUV6:
description: 'WireguardMTUV6 controls the MTU on the IPv6 Wireguard
interface. See Configuring MTU [Default: 1420]'
type: integer
wireguardRoutingRulePriority:
description: 'WireguardRoutingRulePriority controls the priority value
to use for the Wireguard routing rule. [Default: 99]'
type: integer
workloadSourceSpoofing:
description: WorkloadSourceSpoofing controls whether pods can use
the allowedSourcePrefixes annotation to send traffic with a source
IP address that is not theirs. This is disabled by default. When
set to "Any", pods can request any prefix.
type: string
xdpEnabled:
description: 'XDPEnabled enables XDP acceleration for suitable untracked
incoming deny rules. [Default: true]'
type: boolean
xdpRefreshInterval:
description: 'XDPRefreshInterval is the period at which Felix re-checks
all XDP state to ensure that no other process has accidentally broken
Calico''s BPF maps or attached programs. Set to 0 to disable XDP
refresh. [Default: 90s]'
type: string
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkPolicy
listKind: GlobalNetworkPolicyList
plural: globalnetworkpolicies
singular: globalnetworkpolicy
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
properties:
applyOnForward:
description: ApplyOnForward indicates to apply the rules in this policy
on forward traffic.
type: boolean
doNotTrack:
description: DoNotTrack indicates whether packets matched by the rules
in this policy should go through the data plane's connection tracking,
such as Linux conntrack. If True, the rules in this policy are
applied before any data plane connection tracking, and packets allowed
by this policy are marked as not to be tracked.
type: boolean
egress:
description: The ordered set of egress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with \"Not\". All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: : which matches
the path exactly or prefix: : which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
type: object
type: array
ingress:
description: The ordered set of ingress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with \"Not\". All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: : which matches
the path exactly or prefix: : which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
type: object
type: array
namespaceSelector:
description: NamespaceSelector is an optional field for an expression
used to select a pod based on namespaces.
type: string
order:
description: Order is an optional field that specifies the order in
which the policy is applied. Policies with higher "order" are applied
after those with lower order. If the order is omitted, it may be
considered to be "infinite" - i.e. the policy will be applied last. Policies
with identical order will be applied in alphanumerical order based
on the Policy "Name".
type: number
preDNAT:
description: PreDNAT indicates to apply the rules in this policy before
any DNAT.
type: boolean
selector:
description: "The selector is an expression used to pick pick out
the endpoints that the policy should be applied to. \n Selector
expressions follow this syntax: \n \tlabel == \"string_literal\"
\ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\"
\ -> not equal; also matches if label is not present \tlabel in
{ \"a\", \"b\", \"c\", ... } -> true if the value of label X is
one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\",
... } -> true if the value of label X is not one of \"a\", \"b\",
\"c\" \thas(label_name) -> True if that label is present \t! expr
-> negation of expr \texpr && expr -> Short-circuit and \texpr
|| expr -> Short-circuit or \t( expr ) -> parens for grouping \tall()
or the empty selector -> matches all endpoints. \n Label names are
allowed to contain alphanumerics, -, _ and /. String literals are
more permissive but they do not support escape characters. \n Examples
(with made-up labels): \n \ttype == \"webserver\" && deployment
== \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment !=
\"dev\" \t! has(label_name)"
type: string
serviceAccountSelector:
description: ServiceAccountSelector is an optional field for an expression
used to select a pod based on service accounts.
type: string
types:
description: "Types indicates whether this policy applies to ingress,
or to egress, or to both. When not explicitly specified (and so
the value on creation is empty or nil), Calico defaults Types according
to what Ingress and Egress rules are present in the policy. The
default is: \n - [ PolicyTypeIngress ], if there are no Egress rules
(including the case where there are also no Ingress rules) \n
- [ PolicyTypeEgress ], if there are Egress rules but no Ingress
rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are
both Ingress and Egress rules. \n When the policy is read back again,
Types will always be one of these values, never empty or nil."
items:
description: PolicyType enumerates the possible values of the PolicySpec
Types field.
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: GlobalNetworkSet
listKind: GlobalNetworkSetList
plural: globalnetworksets
singular: globalnetworkset
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
description: GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs
that share labels to allow rules to refer to them via selectors. The labels
of GlobalNetworkSet are not namespaced.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: GlobalNetworkSetSpec contains the specification for a NetworkSet
resource.
properties:
nets:
description: The list of IP networks that belong to this set.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: HostEndpoint
listKind: HostEndpointList
plural: hostendpoints
singular: hostendpoint
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: HostEndpointSpec contains the specification for a HostEndpoint
resource.
properties:
expectedIPs:
description: "The expected IP addresses (IPv4 and IPv6) of the endpoint.
If \"InterfaceName\" is not present, Calico will look for an interface
matching any of the IPs in the list and apply policy to that. Note:
\tWhen using the selector match criteria in an ingress or egress
security Policy \tor Profile, Calico converts the selector into
a set of IP addresses. For host \tendpoints, the ExpectedIPs field
is used for that purpose. (If only the interface \tname is specified,
Calico does not learn the IPs of the interface for use in match
\tcriteria.)"
items:
type: string
type: array
interfaceName:
description: "Either \"*\", or the name of a specific Linux interface
to apply policy to; or empty. \"*\" indicates that this HostEndpoint
governs all traffic to, from or through the default network namespace
of the host named by the \"Node\" field; entering and leaving that
namespace via any interface, including those from/to non-host-networked
local workloads. \n If InterfaceName is not \"*\", this HostEndpoint
only governs traffic that enters or leaves the host through the
specific interface named by InterfaceName, or - when InterfaceName
is empty - through the specific interface that has one of the IPs
in ExpectedIPs. Therefore, when InterfaceName is empty, at least
one expected IP must be specified. Only external interfaces (such
as \"eth0\") are supported here; it isn't possible for a HostEndpoint
to protect traffic through a specific local workload interface.
\n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints;
initially just pre-DNAT policy. Please check Calico documentation
for the latest position."
type: string
node:
description: The node name identifying the Calico node instance.
type: string
ports:
description: Ports contains the endpoint's named ports, which may
be referenced in security policy rules.
items:
properties:
name:
type: string
port:
type: integer
protocol:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
required:
- name
- port
- protocol
type: object
type: array
profiles:
description: A list of identifiers of security Profile objects that
apply to this endpoint. Each profile is applied in the order that
they appear in this list. Profile rules are applied after the selector-based
security policy.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMBlock
listKind: IPAMBlockList
plural: ipamblocks
singular: ipamblock
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPAMBlockSpec contains the specification for an IPAMBlock
resource.
properties:
affinity:
description: Affinity of the block, if this block has one. If set,
it will be of the form "host:". If not set, this block
is not affine to a host.
type: string
allocations:
description: Array of allocations in-use within this block. nil entries
mean the allocation is free. For non-nil entries at index i, the
index is the ordinal of the allocation within this block and the
value is the index of the associated attributes in the Attributes
array.
items:
type: integer
# TODO: This nullable is manually added in. We should update controller-gen
# to handle []*int properly itself.
nullable: true
type: array
attributes:
description: Attributes is an array of arbitrary metadata associated
with allocations in the block. To find attributes for a given allocation,
use the value of the allocation's entry in the Allocations array
as the index of the element in this array.
items:
properties:
handle_id:
type: string
secondary:
additionalProperties:
type: string
type: object
type: object
type: array
cidr:
description: The block's CIDR.
type: string
deleted:
description: Deleted is an internal boolean used to workaround a limitation
in the Kubernetes API whereby deletion will not return a conflict
error if the block has been updated. It should not be set manually.
type: boolean
sequenceNumber:
default: 0
description: We store a sequence number that is updated each time
the block is written. Each allocation will also store the sequence
number of the block at the time of its creation. When releasing
an IP, passing the sequence number associated with the allocation
allows us to protect against a race condition and ensure the IP
hasn't been released and re-allocated since the release request.
format: int64
type: integer
sequenceNumberForAllocation:
additionalProperties:
format: int64
type: integer
description: Map of allocated ordinal within the block to sequence
number of the block at the time of allocation. Kubernetes does not
allow numerical keys for maps, so the key is cast to a string.
type: object
strictAffinity:
description: StrictAffinity on the IPAMBlock is deprecated and no
longer used by the code. Use IPAMConfig StrictAffinity instead.
type: boolean
unallocated:
description: Unallocated is an ordered list of allocations which are
free in the block.
items:
type: integer
type: array
required:
- allocations
- attributes
- cidr
- strictAffinity
- unallocated
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMConfig
listKind: IPAMConfigList
plural: ipamconfigs
singular: ipamconfig
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPAMConfigSpec contains the specification for an IPAMConfig
resource.
properties:
autoAllocateBlocks:
type: boolean
maxBlocksPerHost:
description: MaxBlocksPerHost, if non-zero, is the max number of blocks
that can be affine to each host.
maximum: 2147483647
minimum: 0
type: integer
strictAffinity:
type: boolean
required:
- autoAllocateBlocks
- strictAffinity
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPAMHandle
listKind: IPAMHandleList
plural: ipamhandles
singular: ipamhandle
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPAMHandleSpec contains the specification for an IPAMHandle
resource.
properties:
block:
additionalProperties:
type: integer
type: object
deleted:
type: boolean
handleID:
type: string
required:
- block
- handleID
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPPool
listKind: IPPoolList
plural: ippools
singular: ippool
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPPoolSpec contains the specification for an IPPool resource.
properties:
allowedUses:
description: AllowedUse controls what the IP pool will be used for. If
not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility
items:
type: string
type: array
blockSize:
description: The block size to use for IP address assignments from
this pool. Defaults to 26 for IPv4 and 122 for IPv6.
type: integer
cidr:
description: The pool CIDR.
type: string
disableBGPExport:
description: 'Disable exporting routes from this IP Pool''s CIDR over
BGP. [Default: false]'
type: boolean
disabled:
description: When disabled is true, Calico IPAM will not assign addresses
from this pool.
type: boolean
ipip:
description: 'Deprecated: this field is only used for APIv1 backwards
compatibility. Setting this field is not allowed, this field is
for internal use only.'
properties:
enabled:
description: When enabled is true, ipip tunneling will be used
to deliver packets to destinations within this pool.
type: boolean
mode:
description: The IPIP mode. This can be one of "always" or "cross-subnet". A
mode of "always" will also use IPIP tunneling for routing to
destination IP addresses within this pool. A mode of "cross-subnet"
will only use IPIP tunneling when the destination node is on
a different subnet to the originating node. The default value
(if not specified) is "always".
type: string
type: object
ipipMode:
description: Contains configuration for IPIP tunneling for this pool.
If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling
is disabled).
type: string
nat-outgoing:
description: 'Deprecated: this field is only used for APIv1 backwards
compatibility. Setting this field is not allowed, this field is
for internal use only.'
type: boolean
natOutgoing:
description: When natOutgoing is true, packets sent from Calico networked
containers in this pool to destinations outside of this pool will
be masqueraded.
type: boolean
nodeSelector:
description: Allows IPPool to allocate for a specific node by label
selector.
type: string
vxlanMode:
description: Contains configuration for VXLAN tunneling for this pool.
If not specified, then this is defaulted to "Never" (i.e. VXLAN
tunneling is disabled).
type: string
required:
- cidr
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: (devel)
creationTimestamp: null
name: ipreservations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: IPReservation
listKind: IPReservationList
plural: ipreservations
singular: ipreservation
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: IPReservationSpec contains the specification for an IPReservation
resource.
properties:
reservedCIDRs:
description: ReservedCIDRs is a list of CIDRs and/or IP addresses
that Calico IPAM will exclude from new allocations.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: kubecontrollersconfigurations.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: KubeControllersConfiguration
listKind: KubeControllersConfigurationList
plural: kubecontrollersconfigurations
singular: kubecontrollersconfiguration
preserveUnknownFields: false
scope: Cluster
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: KubeControllersConfigurationSpec contains the values of the
Kubernetes controllers configuration.
properties:
controllers:
description: Controllers enables and configures individual Kubernetes
controllers
properties:
namespace:
description: Namespace enables and configures the namespace controller.
Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
node:
description: Node enables and configures the node controller.
Enabled by default, set to nil to disable.
properties:
hostEndpoint:
description: HostEndpoint controls syncing nodes to host endpoints.
Disabled by default, set to nil to disable.
properties:
autoCreate:
description: 'AutoCreate enables automatic creation of
host endpoints for every node. [Default: Disabled]'
type: string
type: object
leakGracePeriod:
description: 'LeakGracePeriod is the period used by the controller
to determine if an IP address has been leaked. Set to 0
to disable IP garbage collection. [Default: 15m]'
type: string
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
syncLabels:
description: 'SyncLabels controls whether to copy Kubernetes
node labels to Calico nodes. [Default: Enabled]'
type: string
type: object
policy:
description: Policy enables and configures the policy controller.
Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
serviceAccount:
description: ServiceAccount enables and configures the service
account controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
workloadEndpoint:
description: WorkloadEndpoint enables and configures the workload
endpoint controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform reconciliation
with the Calico datastore. [Default: 5m]'
type: string
type: object
type: object
debugProfilePort:
description: DebugProfilePort configures the port to serve memory
and cpu profiles on. If not specified, profiling is disabled.
format: int32
type: integer
etcdV3CompactionPeriod:
description: 'EtcdV3CompactionPeriod is the period between etcdv3
compaction requests. Set to 0 to disable. [Default: 10m]'
type: string
healthChecks:
description: 'HealthChecks enables or disables support for health
checks [Default: Enabled]'
type: string
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which logs
are sent to the stdout. [Default: Info]'
type: string
prometheusMetricsPort:
description: 'PrometheusMetricsPort is the TCP port that the Prometheus
metrics server should bind to. Set to 0 to disable. [Default: 9094]'
type: integer
required:
- controllers
type: object
status:
description: KubeControllersConfigurationStatus represents the status
of the configuration. It's useful for admins to be able to see the actual
config that was applied, which can be modified by environment variables
on the kube-controllers process.
properties:
environmentVars:
additionalProperties:
type: string
description: EnvironmentVars contains the environment variables on
the kube-controllers that influenced the RunningConfig.
type: object
runningConfig:
description: RunningConfig contains the effective config that is running
in the kube-controllers pod, after merging the API resource with
any environment variables.
properties:
controllers:
description: Controllers enables and configures individual Kubernetes
controllers
properties:
namespace:
description: Namespace enables and configures the namespace
controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
node:
description: Node enables and configures the node controller.
Enabled by default, set to nil to disable.
properties:
hostEndpoint:
description: HostEndpoint controls syncing nodes to host
endpoints. Disabled by default, set to nil to disable.
properties:
autoCreate:
description: 'AutoCreate enables automatic creation
of host endpoints for every node. [Default: Disabled]'
type: string
type: object
leakGracePeriod:
description: 'LeakGracePeriod is the period used by the
controller to determine if an IP address has been leaked.
Set to 0 to disable IP garbage collection. [Default:
15m]'
type: string
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
syncLabels:
description: 'SyncLabels controls whether to copy Kubernetes
node labels to Calico nodes. [Default: Enabled]'
type: string
type: object
policy:
description: Policy enables and configures the policy controller.
Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
serviceAccount:
description: ServiceAccount enables and configures the service
account controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
workloadEndpoint:
description: WorkloadEndpoint enables and configures the workload
endpoint controller. Enabled by default, set to nil to disable.
properties:
reconcilerPeriod:
description: 'ReconcilerPeriod is the period to perform
reconciliation with the Calico datastore. [Default:
5m]'
type: string
type: object
type: object
debugProfilePort:
description: DebugProfilePort configures the port to serve memory
and cpu profiles on. If not specified, profiling is disabled.
format: int32
type: integer
etcdV3CompactionPeriod:
description: 'EtcdV3CompactionPeriod is the period between etcdv3
compaction requests. Set to 0 to disable. [Default: 10m]'
type: string
healthChecks:
description: 'HealthChecks enables or disables support for health
checks [Default: Enabled]'
type: string
logSeverityScreen:
description: 'LogSeverityScreen is the log severity above which
logs are sent to the stdout. [Default: Info]'
type: string
prometheusMetricsPort:
description: 'PrometheusMetricsPort is the TCP port that the Prometheus
metrics server should bind to. Set to 0 to disable. [Default:
9094]'
type: integer
required:
- controllers
type: object
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: NetworkPolicy
listKind: NetworkPolicyList
plural: networkpolicies
singular: networkpolicy
preserveUnknownFields: false
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
properties:
egress:
description: The ordered set of egress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with \"Not\". All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: : which matches
the path exactly or prefix: : which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
type: object
type: array
ingress:
description: The ordered set of ingress rules. Each rule contains
a set of packet match criteria and a corresponding action to apply.
items:
description: "A Rule encapsulates a set of match criteria and an
action. Both selector-based security Policy and security Profiles
reference rules - separated out as a list of rules for both ingress
and egress packet matching. \n Each positive match criteria has
a negated version, prefixed with \"Not\". All the match criteria
within a rule must be satisfied for a packet to match. A single
rule can contain the positive and negative version of a match
and both must be satisfied for the rule to match."
properties:
action:
type: string
destination:
description: Destination contains the match criteria that apply
to destination entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
http:
description: HTTP contains match criteria that apply to HTTP
requests.
properties:
methods:
description: Methods is an optional field that restricts
the rule to apply only to HTTP requests that use one of
the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple
methods are OR'd together.
items:
type: string
type: array
paths:
description: 'Paths is an optional field that restricts
the rule to apply to HTTP requests that use one of the
listed HTTP Paths. Multiple paths are OR''d together.
e.g: - exact: /foo - prefix: /bar NOTE: Each entry may
ONLY specify either a `exact` or a `prefix` match. The
validator will check for it.'
items:
description: 'HTTPPath specifies an HTTP path to match.
It may be either of the form: exact: : which matches
the path exactly or prefix: : which matches
the path prefix'
properties:
exact:
type: string
prefix:
type: string
type: object
type: array
type: object
icmp:
description: ICMP is an optional field that restricts the rule
to apply to a specific type and code of ICMP traffic. This
should only be specified if the Protocol field is set to "ICMP"
or "ICMPv6".
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
ipVersion:
description: IPVersion is an optional field that restricts the
rule to only match a specific IP version.
type: integer
metadata:
description: Metadata contains additional information for this
rule
properties:
annotations:
additionalProperties:
type: string
description: Annotations is a set of key value pairs that
give extra information about the rule
type: object
type: object
notICMP:
description: NotICMP is the negated version of the ICMP field.
properties:
code:
description: Match on a specific ICMP code. If specified,
the Type value must also be specified. This is a technical
limitation imposed by the kernel's iptables firewall,
which Calico uses to enforce the rule.
type: integer
type:
description: Match on a specific ICMP type. For example
a value of 8 refers to ICMP Echo Request (i.e. pings).
type: integer
type: object
notProtocol:
anyOf:
- type: integer
- type: string
description: NotProtocol is the negated version of the Protocol
field.
pattern: ^.*
x-kubernetes-int-or-string: true
protocol:
anyOf:
- type: integer
- type: string
description: "Protocol is an optional field that restricts the
rule to only apply to traffic of a specific IP protocol. Required
if any of the EntityRules contain Ports (because ports only
apply to certain protocols). \n Must be one of these string
values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\",
\"UDPLite\" or an integer in the range 1-255."
pattern: ^.*
x-kubernetes-int-or-string: true
source:
description: Source contains the match criteria that apply to
source entity.
properties:
namespaceSelector:
description: "NamespaceSelector is an optional field that
contains a selector expression. Only traffic that originates
from (or terminates at) endpoints within the selected
namespaces will be matched. When both NamespaceSelector
and another selector are defined on the same rule, then
only workload endpoints that are matched by both selectors
will be selected by the rule. \n For NetworkPolicy, an
empty NamespaceSelector implies that the Selector is limited
to selecting only workload endpoints in the same namespace
as the NetworkPolicy. \n For NetworkPolicy, `global()`
NamespaceSelector implies that the Selector is limited
to selecting only GlobalNetworkSet or HostEndpoint. \n
For GlobalNetworkPolicy, an empty NamespaceSelector implies
the Selector applies to workload endpoints across all
namespaces."
type: string
nets:
description: Nets is an optional field that restricts the
rule to only apply to traffic that originates from (or
terminates at) IP addresses in any of the given subnets.
items:
type: string
type: array
notNets:
description: NotNets is the negated version of the Nets
field.
items:
type: string
type: array
notPorts:
description: NotPorts is the negated version of the Ports
field. Since only some protocols have ports, if any ports
are specified it requires the Protocol match in the Rule
to be set to "TCP" or "UDP".
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
notSelector:
description: NotSelector is the negated version of the Selector
field. See Selector field for subtleties with negated
selectors.
type: string
ports:
description: "Ports is an optional field that restricts
the rule to only apply to traffic that has a source (destination)
port that matches one of these ranges/values. This value
is a list of integers or strings that represent ranges
of ports. \n Since only some protocols have ports, if
any ports are specified it requires the Protocol match
in the Rule to be set to \"TCP\" or \"UDP\"."
items:
anyOf:
- type: integer
- type: string
pattern: ^.*
x-kubernetes-int-or-string: true
type: array
selector:
description: "Selector is an optional field that contains
a selector expression (see Policy for sample syntax).
\ Only traffic that originates from (terminates at) endpoints
matching the selector will be matched. \n Note that: in
addition to the negated version of the Selector (see NotSelector
below), the selector expression syntax itself supports
negation. The two types of negation are subtly different.
One negates the set of matched endpoints, the other negates
the whole match: \n \tSelector = \"!has(my_label)\" matches
packets that are from other Calico-controlled \tendpoints
that do not have the label \"my_label\". \n \tNotSelector
= \"has(my_label)\" matches packets that are not from
Calico-controlled \tendpoints that do have the label \"my_label\".
\n The effect is that the latter will accept packets from
non-Calico sources whereas the former is limited to packets
from Calico-controlled endpoints."
type: string
serviceAccounts:
description: ServiceAccounts is an optional field that restricts
the rule to only apply to traffic that originates from
(or terminates at) a pod running as a matching service
account.
properties:
names:
description: Names is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account whose name is in the list.
items:
type: string
type: array
selector:
description: Selector is an optional field that restricts
the rule to only apply to traffic that originates
from (or terminates at) a pod running as a service
account that matches the given label selector. If
both Names and Selector are specified then they are
AND'ed.
type: string
type: object
services:
description: "Services is an optional field that contains
options for matching Kubernetes Services. If specified,
only traffic that originates from or terminates at endpoints
within the selected service(s) will be matched, and only
to/from each endpoint's port. \n Services cannot be specified
on the same rule as Selector, NotSelector, NamespaceSelector,
Nets, NotNets or ServiceAccounts. \n Ports and NotPorts
can only be specified with Services on ingress rules."
properties:
name:
description: Name specifies the name of a Kubernetes
Service to match.
type: string
namespace:
description: Namespace specifies the namespace of the
given Service. If left empty, the rule will match
within this policy's namespace.
type: string
type: object
type: object
required:
- action
type: object
type: array
order:
description: Order is an optional field that specifies the order in
which the policy is applied. Policies with higher "order" are applied
after those with lower order. If the order is omitted, it may be
considered to be "infinite" - i.e. the policy will be applied last. Policies
with identical order will be applied in alphanumerical order based
on the Policy "Name".
type: number
selector:
description: "The selector is an expression used to pick pick out
the endpoints that the policy should be applied to. \n Selector
expressions follow this syntax: \n \tlabel == \"string_literal\"
\ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\"
\ -> not equal; also matches if label is not present \tlabel in
{ \"a\", \"b\", \"c\", ... } -> true if the value of label X is
one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\",
... } -> true if the value of label X is not one of \"a\", \"b\",
\"c\" \thas(label_name) -> True if that label is present \t! expr
-> negation of expr \texpr && expr -> Short-circuit and \texpr
|| expr -> Short-circuit or \t( expr ) -> parens for grouping \tall()
or the empty selector -> matches all endpoints. \n Label names are
allowed to contain alphanumerics, -, _ and /. String literals are
more permissive but they do not support escape characters. \n Examples
(with made-up labels): \n \ttype == \"webserver\" && deployment
== \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment !=
\"dev\" \t! has(label_name)"
type: string
serviceAccountSelector:
description: ServiceAccountSelector is an optional field for an expression
used to select a pod based on service accounts.
type: string
types:
description: "Types indicates whether this policy applies to ingress,
or to egress, or to both. When not explicitly specified (and so
the value on creation is empty or nil), Calico defaults Types according
to what Ingress and Egress are present in the policy. The default
is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including
the case where there are also no Ingress rules) \n - [ PolicyTypeEgress
], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress,
PolicyTypeEgress ], if there are both Ingress and Egress rules.
\n When the policy is read back again, Types will always be one
of these values, never empty or nil."
items:
description: PolicyType enumerates the possible values of the PolicySpec
Types field.
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
spec:
group: crd.projectcalico.org
names:
kind: NetworkSet
listKind: NetworkSetList
plural: networksets
singular: networkset
preserveUnknownFields: false
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: NetworkSetSpec contains the specification for a NetworkSet
resource.
properties:
nets:
description: The list of IP networks that belong to this set.
items:
type: string
type: array
type: object
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: calico/templates/calico-kube-controllers-rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are watched to check for existence as part of IPAM controller.
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
# IPAM resources are manipulated in response to node and block updates, as well as periodic triggers.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipreservations
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- watch
# Pools are watched to maintain a mapping of blocks to IP pools.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- watch
# kube-controllers manages hostendpoints.
- apiGroups: ["crd.projectcalico.org"]
resources:
- hostendpoints
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- list
- create
- update
- watch
# KubeControllersConfiguration is where it gets its config
- apiGroups: ["crd.projectcalico.org"]
resources:
- kubecontrollersconfigurations
verbs:
# read its own config
- get
# create a default if none exists
- create
# update status
- update
# watch for changes
- watch
---
# Source: calico/templates/calico-node-rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
# Used for creating service account tokens to be used by the CNI plugin
- apiGroups: [""]
resources:
- serviceaccounts/token
resourceNames:
- calico-cni-plugin
verbs:
- create
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
# EndpointSlices are used for Service-based network policy rule
# enforcement.
- apiGroups: ["discovery.k8s.io"]
resources:
- endpointslices
verbs:
- watch
- list
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
# Pod CIDR auto-detection on kubeadm needs access to config maps.
- apiGroups: [""]
resources:
- configmaps
verbs:
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- bgpfilters
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipreservations
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
- caliconodestatuses
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico must update some CRDs.
- apiGroups: [ "crd.projectcalico.org" ]
resources:
- caliconodestatuses
verbs:
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only required for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# The CNI plugin and calico/node need to be able to create a default
# IPAMConfiguration
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
- create
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
# Source: calico/templates/calico-node-rbac.yaml
# CNI cluster role
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-cni-plugin
rules:
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
- clusterinformations
- ippools
- ipreservations
- ipamconfigs
verbs:
- get
- list
- create
- update
- delete
---
# Source: calico/templates/calico-kube-controllers-rbac.yaml
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-node-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-node-rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: docker.io/calico/cni:v3.26.4
imagePullPolicy: IfNotPresent
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
securityContext:
privileged: true
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: docker.io/calico/cni:v3.26.4
imagePullPolicy: IfNotPresent
command: ["/opt/cni/bin/install"]
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
securityContext:
privileged: true
# This init container mounts the necessary filesystems needed by the BPF data plane
# i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed
# in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode.
- name: "mount-bpffs"
image: docker.io/calico/node:v3.26.4
imagePullPolicy: IfNotPresent
command: ["calico-node", "-init", "-best-effort"]
volumeMounts:
- mountPath: /sys/fs
name: sys-fs
# Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host
# so that it outlives the init container.
mountPropagation: Bidirectional
- mountPath: /var/run/calico
name: var-run-calico
# Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host
# so that it outlives the init container.
mountPropagation: Bidirectional
# Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary,
# executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly.
- mountPath: /nodeproc
name: nodeproc
readOnly: true
securityContext:
privileged: true
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: docker.io/calico/node:v3.26.4
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
# Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode.
name: kubernetes-services-endpoint
optional: true
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
value: "Never"
# Enable or Disable VXLAN on the default IPv6 IP pool.
- name: CALICO_IPV6POOL_VXLAN
value: "Never"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the VXLAN tunnel device.
- name: FELIX_VXLANMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Set MTU for the Wireguard tunnel device.
- name: FELIX_WIREGUARDMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
# - name: CALICO_IPV4POOL_CIDR
# value: "192.168.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
lifecycle:
preStop:
exec:
command:
- /bin/calico-node
- -shutdown
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
- -bird-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
- -bird-ready
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
# For maintaining CNI plugin API credentials.
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
# For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the
# parent directory.
- name: bpffs
mountPath: /sys/fs/bpf
- name: cni-log-dir
mountPath: /var/log/calico/cni
readOnly: true
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
- name: sys-fs
hostPath:
path: /sys/fs/
type: DirectoryOrCreate
- name: bpffs
hostPath:
path: /sys/fs/bpf
type: Directory
# mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs.
- name: nodeproc
hostPath:
path: /proc
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to access CNI logs.
- name: cni-log-dir
hostPath:
path: /var/log/calico/cni
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: docker.io/calico/kube-controllers:v3.26.4
imagePullPolicy: IfNotPresent
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
livenessProbe:
exec:
command:
- /usr/bin/check-status
- -l
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
periodSeconds: 10
================================================
FILE: e2e/clusters/eks-with-calico-cni/terraform/main.tf
================================================
// we spin up an EKS cluster
module "eks_cluster_calico_cni" {
source = "../../aws-eks-terraform-module"
region = var.region
cluster_name = var.cluster_name
cluster_version = var.cluster_version
kubeconfig_file = var.kubeconfig_file
desired_size = var.desired_size
node_group_name = var.node_group_name
enable_vpc_network_policies = var.enable_vpc_network_policies
}
================================================
FILE: e2e/clusters/eks-with-calico-cni/terraform/vars.tf
================================================
variable "region" {
description = "AWS region"
type = string
}
variable "cluster_version" {
description = "The AWS EKS cluster version"
type = string
}
variable "cluster_name" {
type = string
description = "name of the cluster and VPC"
}
variable "kubeconfig_file" {
type = string
description = "name of the file that contains the kubeconfig information"
default = ".kubeconfig"
}
variable "desired_size" {
type = number
description = "desired size of the worker node pool"
default = 0
}
variable "node_group_name" {
type = string
description = "prefix of the node group"
default = "group"
}
variable "enable_vpc_network_policies" {
type = bool
description = "enable or disable vpc network policies"
}
================================================
FILE: e2e/clusters/eks-with-vpc-cni/terraform/main.tf
================================================
// we spin up an EKS cluster
module "eks_cluster_vpc_cni" {
source = "../../aws-eks-terraform-module"
region = var.region
cluster_name = var.cluster_name
cluster_version = var.cluster_version
kubeconfig_file = var.kubeconfig_file
desired_size = var.desired_size
node_group_name = var.node_group_name
enable_vpc_network_policies = var.enable_vpc_network_policies
}
================================================
FILE: e2e/clusters/eks-with-vpc-cni/terraform/vars.tf
================================================
variable "region" {
description = "AWS region"
type = string
}
variable "cluster_version" {
description = "The AWS EKS cluster version"
type = string
}
variable "cluster_name" {
type = string
description = "name of the cluster and VPC"
}
variable "kubeconfig_file" {
type = string
description = "name of the file that contains the kubeconfig information"
default = ".kubeconfig"
}
variable "desired_size" {
type = number
description = "desired size of the worker node pool"
default = 0
}
variable "node_group_name" {
type = string
description = "prefix of the node group"
default = "group"
}
variable "enable_vpc_network_policies" {
type = bool
description = "enable or disable vpc network policies"
}
================================================
FILE: e2e/clusters/gke-dataplanev2/main.tf
================================================
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "~> 4.57.0"
}
}
}
provider "google" {
zone = var.zone
}
resource "google_container_cluster" "e2etest" {
name = var.cluster_name
initial_node_count = 4
datapath_provider = var.use_dataplanev2 ? "ADVANCED_DATAPATH" : null
ip_allocation_policy {}
node_config {
machine_type = "e2-standard-2"
}
release_channel {
channel = var.cluster_version
}
provisioner "local-exec" {
environment = {
KUBECONFIG = var.kubeconfig_file
}
command = "gcloud container clusters get-credentials ${self.name} --region ${self.location}"
}
}
================================================
FILE: e2e/clusters/gke-dataplanev2/variables.tf
================================================
variable "zone" {
type = string
}
variable "cluster_name" {
type = string
}
variable "cluster_version" {
type = string
}
variable "use_dataplanev2" {
type = bool
}
variable "kubeconfig_file" {
type = string
}
================================================
FILE: e2e/clusters/gke-vpc/main.tf
================================================
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "~> 4.57.0"
}
}
}
provider "google" {
zone = var.zone
}
resource "google_container_cluster" "e2etest" {
name = var.cluster_name
initial_node_count = 4
addons_config {
network_policy_config {
disabled = false
}
}
network_policy {
enabled = true
}
ip_allocation_policy {}
node_config {
machine_type = "e2-standard-2"
}
release_channel {
channel = var.cluster_version
}
provisioner "local-exec" {
environment = {
KUBECONFIG = var.kubeconfig_file
}
command = "gcloud container clusters get-credentials ${self.name} --region ${self.location}"
}
}
================================================
FILE: e2e/clusters/gke-vpc/variables.tf
================================================
variable "zone" {
type = string
}
variable "cluster_name" {
type = string
}
variable "cluster_version" {
type = string
}
variable "use_dataplanev2" {
type = bool
}
variable "kubeconfig_file" {
type = string
}
================================================
FILE: e2e/clusters/kind/kind-config.yaml
================================================
---
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
# 1 control plane node and 3 workers
nodes:
# the control plane node config
- role: control-plane
image: kindest/node:v1.35.0
# the three workers
- role: worker
image: kindest/node:v1.35.0
- role: worker
image: kindest/node:v1.35.0
- role: worker
image: kindest/node:v1.35.0
networking:
disableDefaultCNI: true
podSubnet: 192.168.0.0/16
================================================
FILE: e2e/e2e_test.go
================================================
package e2e
import (
"bytes"
"context"
"io"
"os"
"slices"
"strings"
"testing"
"time"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/yaml"
"github.com/controlplaneio/netassert/v2/e2e/helpers"
"github.com/controlplaneio/netassert/v2/internal/data"
"github.com/controlplaneio/netassert/v2/internal/engine"
"github.com/controlplaneio/netassert/v2/internal/kubeops"
"github.com/controlplaneio/netassert/v2/internal/logger"
)
const (
suffixLength = 9 // suffix length of the random string to be appended to the container name
snifferContainerImage = "docker.io/controlplane/netassertv2-packet-sniffer:latest"
snifferContainerPrefix = "netassertv2-sniffer"
scannerContainerImage = "docker.io/controlplane/netassertv2-l4-client:latest"
scannerContainerPrefix = "netassertv2-client"
pauseInSeconds = 5 // time to pause before each test case
packetCaputureInterface = `eth0`
testCasesFile = `./manifests/test-cases.yaml`
resultFile = "result.log" // where we write the results
)
var (
envVarKind = `KIND_E2E_TESTS`
envVarGKEWithVPC = `GKE_VPC_E2E_TESTS`
envVarGKEWithDPv2 = `GKE_DPV2_E2E_TESTS`
envVarEKSWithVPC = `EKS_VPC_E2E_TESTS`
envVarEKSWithCalico = `EKS_CALICO_E2E_TESTS`
)
type MinimalK8sObject struct {
Kind string `json:"kind"`
Metadata struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
} `json:"metadata"`
}
var denyAllPolicyBody = `
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-all
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
`
func TestMain(m *testing.M) {
exitVal := m.Run()
os.Exit(exitVal)
}
func TestKind(t *testing.T) {
t.Parallel()
if os.Getenv(envVarKind) == "" {
t.Skipf("skipping test associated with Kind as %q environment variable was not set", envVarKind)
}
kind := helpers.NewKindCluster(t, "./clusters/kind", "kind-calico", helpers.Calico)
createTestDestroy(t, kind)
}
func TestGKEWithVPC(t *testing.T) {
t.Parallel()
if os.Getenv(envVarGKEWithVPC) == "" {
t.Skipf("skipping test associated with GKE VPC as %q environment variable was not set", envVarGKEWithVPC)
}
gke := helpers.NewGKECluster(t, "./clusters/gke-vpc", "vpc", helpers.VPC)
createTestDestroy(t, gke)
}
func TestGKEWithDataPlaneV2(t *testing.T) {
t.Parallel()
if os.Getenv(envVarGKEWithDPv2) == "" {
t.Skipf("skipping test associated with GKE DataPlaneV2 as %q environment variable was not set", envVarGKEWithDPv2)
}
gke := helpers.NewGKECluster(t, "./clusters/gke-dataplanev2", "dataplanev2", helpers.DataPlaneV2)
createTestDestroy(t, gke)
}
func TestEKSWithVPC(t *testing.T) {
t.Parallel()
pn := os.Getenv(envVarEKSWithVPC)
if pn == "" {
t.Skipf("skipping test associated with EKS VPC CNI as %q environment variable was not set", envVarEKSWithVPC)
}
eks := helpers.NewEKSCluster(t, "./clusters/eks-with-vpc-cni/terraform", "vpc", helpers.VPC)
createTestDestroy(t, eks)
}
func TestEKSWithCalico(t *testing.T) {
t.Parallel()
pn := os.Getenv(envVarEKSWithCalico)
if pn == "" {
t.Skipf("skipping test associated with EKS Calico CNI as environment %q was not set", envVarEKSWithCalico)
}
eks := helpers.NewEKSCluster(t, "./clusters/eks-with-calico-cni/terraform", "calico", helpers.Calico)
createTestDestroy(t, eks)
}
func waitUntilManifestReady(t *testing.T, svc *kubeops.Service, manifestPath string) []string {
timeout := 20 * time.Minute
pollTime := 30 * time.Second
data, err := os.ReadFile(manifestPath)
require.NoError(t, err, "Failed to read manifest file")
decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(data), 4096)
namespaces := make([]string, 0)
for {
var obj MinimalK8sObject
err := decoder.Decode(&obj)
if err == io.EOF {
break
}
require.NoError(t, err, "Failed to parse YAML document")
if obj.Kind == "" || obj.Metadata.Name == "" {
t.Fatalf("Found malformed kubernetes document in YAML")
continue
}
kind := strings.ToLower(obj.Kind)
switch kind {
case "deployment", "daemonset", "replicaset", "statefulset", "pod":
default:
continue
}
targetNs := strings.ToLower(obj.Metadata.Namespace)
if targetNs == "" {
targetNs = "default"
}
if !slices.Contains(namespaces, targetNs) {
namespaces = append(namespaces, targetNs)
}
if err := svc.WaitForPodInResourceReady(obj.Metadata.Name, targetNs, kind, pollTime, timeout); err != nil {
t.Fatalf("Error while waiting for resource %s to become ready: %s", obj.Metadata.Name, err.Error())
}
}
return namespaces
}
func createTestDestroy(t *testing.T, gc helpers.GenericCluster) {
defer gc.Destroy(t) // safe to call also when the cluster has not been created
gc.Create(t)
ctx := context.Background()
kubeConfig := gc.KubeConfigGet()
svc, err := kubeops.NewServiceFromKubeConfigFile(kubeConfig, hclog.NewNullLogger())
if err != nil {
t.Logf("Failed to build kubernetes client: %s", err)
t.Fatal(err)
}
if err := svc.PingHealthEndpoint(ctx, "/healthz"); err != nil {
t.Logf("Failed to ping kubernetes server: %s", err)
t.Fatal(err)
}
t.Log("successfully pinged the k8s server")
// create a new Kubernetes client
options := k8s.NewKubectlOptions("", kubeConfig, "")
// let's wait for all the nodes to be ready
k8s.WaitUntilAllNodesReady(t, options, 20, 1*time.Minute)
// we apply all the manifests and then run
k8s.KubectlApply(t, options, "./manifests/workload.yaml")
namespaces := waitUntilManifestReady(t, svc, "./manifests/workload.yaml")
netAssertTestCases, err := data.ReadTestsFromFile(testCasesFile)
if err != nil {
t.Fatal(err)
}
// create the network policies
k8s.KubectlApply(t, options, "./manifests/networkpolicies.yaml")
// run the sample tests
runTests(ctx, t, svc, netAssertTestCases)
if gc.SkipNetPolTests() {
return
}
// read the tests again for a fresh start
netAssertTestCases, err = data.ReadTestsFromFile(testCasesFile)
if err != nil {
t.Fatal(err)
}
// set the exit to 1 since this time the network policies will block the traffic
for _, tc := range netAssertTestCases {
tc.ExitCode = 1
}
for _, ns := range namespaces {
nsKubeOptions := k8s.NewKubectlOptions("", kubeConfig, ns)
k8s.KubectlApplyFromString(t, nsKubeOptions, denyAllPolicyBody)
k8s.WaitUntilNetworkPolicyAvailable(t, nsKubeOptions, "default-deny-all", 10, 5*time.Second)
require.NoError(t, err, "Error, the NetworkPolicy should exist in namespace %s", ns)
}
// run the tests with network policies blocking everything
runTests(ctx, t, svc, netAssertTestCases)
}
func runTests(ctx context.Context, t *testing.T, svc *kubeops.Service, netAssertTestCases data.Tests) {
lg := logger.NewHCLogger("INFO", "netassertv2-e2e", os.Stdout)
testRunner := engine.New(svc, lg)
testRunner.RunTests(
ctx, // context to use
netAssertTestCases, // net assert test cases
snifferContainerPrefix, // prefix used for the sniffer container name
snifferContainerImage, // sniffer container image location
scannerContainerPrefix, // scanner container prefix used in the container name
scannerContainerImage, // scanner container image location
suffixLength, // length of random string that will be appended to the snifferContainerPrefix and scannerContainerPrefix
time.Duration(pauseInSeconds)*time.Second, // time to pause between each test
packetCaputureInterface, // the interface used by the sniffer image to capture traffic
)
fh, err := os.Create(resultFile)
if err != nil {
t.Log("failed to create file", resultFile, err)
t.Fatal(err)
}
mr := io.MultiWriter(fh, os.Stdout)
lg = logger.NewHCLogger("INFO", "netassertv2-e2e", mr)
failedTestCases := 0
for _, v := range netAssertTestCases {
// increment the no. of test cases
if v.Pass {
lg.Info("✅ Test Result", "Name", v.Name, "Pass", v.Pass)
continue
}
lg.Info("❌ Test Result", "Name", v.Name, "Pass", v.Pass, "FailureReason", v.FailureReason)
failedTestCases++
}
if failedTestCases > 0 {
t.Fatal("e2e tests have failed", err)
}
}
================================================
FILE: e2e/helpers/common.go
================================================
package helpers
import "testing"
const (
VPC NetworkMode = "vpc"
DataPlaneV2 NetworkMode = "dataplanev2"
Calico NetworkMode = "calico"
)
type NetworkMode string
type GenericCluster interface {
Create(t *testing.T)
Destroy(t *testing.T)
KubeConfigGet() string
SkipNetPolTests() bool
}
================================================
FILE: e2e/helpers/eks.go
================================================
package helpers
import (
"context"
"os"
"testing"
"time"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/terraform"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/controlplaneio/netassert/v2/internal/kubeops"
"github.com/controlplaneio/netassert/v2/internal/logger"
)
type EKSCluster struct {
terraformDir string
region string
name string
version string
networkMode NetworkMode
kubeConfig string
kubeConfigPath string
opts *terraform.Options
}
func NewEKSCluster(t *testing.T, terraformDir, clusterNameSuffix string, nm NetworkMode) *EKSCluster {
name := "netassert-" + clusterNameSuffix
c := &EKSCluster{
terraformDir: terraformDir,
region: "us-east-2",
name: name,
version: "1.34",
networkMode: nm,
kubeConfig: name + ".kubeconfig",
kubeConfigPath: terraformDir + "/" + name + ".kubeconfig",
}
vpcNetPol := nm == VPC
tv := map[string]interface{}{
"region": c.region,
"cluster_version": c.version,
"cluster_name": c.name,
"kubeconfig_file": c.kubeConfig,
"desired_size": 3,
"node_group_name": "ng",
"enable_vpc_network_policies": vpcNetPol,
}
if nm == Calico {
tv["desired_size"] = 0
tv["node_group_name"] = "group"
}
c.opts = terraform.WithDefaultRetryableErrors(t, &terraform.Options{
TerraformDir: c.terraformDir,
Vars: tv,
})
return c
}
func (g *EKSCluster) Create(t *testing.T) {
// terraform init
terraform.InitAndPlan(t, g.opts)
// terraform apply
terraform.Apply(t, g.opts)
if g.networkMode == Calico {
g.installCalico(t)
}
}
func (g *EKSCluster) installCalico(t *testing.T) {
// once the cluster is ready, we need to follow the instructions here
// https://docs.tigera.io/calico/3.26/getting-started/kubernetes/managed-public-cloud/eks
ctx := context.Background()
lg := logger.NewHCLogger("INFO", "netassertv2-e2e-calico", os.Stdout)
svc, err := kubeops.NewServiceFromKubeConfigFile(g.kubeConfigPath, lg)
if err != nil {
t.Fatalf("Failed to build kubernetes client: %s", err)
}
// kubectl delete daemonset -n kube-system aws-node
err = svc.Client.AppsV1().DaemonSets("kube-system").Delete(
ctx, "aws-node", metav1.DeleteOptions{})
if err != nil {
if !apierrors.IsNotFound(err) {
t.Fatalf("failed to delete daemonset aws-node in the kube-system namespace")
}
}
svc.Log.Info("AWS-CNI", "msg", "Deleted daemonset aws-node in the kube-system namespace")
// create a new Kubernetes client using the terratest package
options := k8s.NewKubectlOptions("", g.kubeConfigPath, "")
// we now apply calico CNI manifest
k8s.KubectlApply(t, options, g.terraformDir+"/../calico-3.26.4.yaml")
// update the desired_size variable to 3
g.opts.Vars["desired_size"] = 3
g.opts.Vars["node_group_name"] = "calico"
newTFOptions := terraform.WithDefaultRetryableErrors(t, g.opts)
// terraform apply the new options
// this terraform apply should scale up the worker nodes with Calico CNI
if _, err := terraform.InitAndApplyE(t, newTFOptions); err != nil {
t.Fatalf("failed to run terraform init and apply: %s", err)
}
svc.Log.Info("Sleeping 20 minutes so connectivity from the cluster to the Internet is restored")
time.Sleep(20 * time.Minute)
}
func (g *EKSCluster) Destroy(t *testing.T) {
if g.opts != nil {
terraform.Destroy(t, g.opts)
}
}
func (g *EKSCluster) KubeConfigGet() string {
return g.kubeConfigPath
}
func (g *EKSCluster) SkipNetPolTests() bool {
return false // used to be: return g.networkMode != Calico
}
================================================
FILE: e2e/helpers/gke.go
================================================
package helpers
import (
"testing"
"github.com/gruntwork-io/terratest/modules/terraform"
)
type GKECluster struct {
terraformDir string
zone string
name string
version string
networkMode NetworkMode
kubeConfig string
kubeConfigPath string
opts *terraform.Options
}
func NewGKECluster(t *testing.T, terraformDir, clusterNameSuffix string, nm NetworkMode) *GKECluster {
name := "netassert-" + clusterNameSuffix
c := &GKECluster{
terraformDir: terraformDir,
zone: "us-central1-b",
name: name,
version: "REGULAR",
networkMode: nm,
kubeConfig: name + ".kubeconfig",
kubeConfigPath: terraformDir + "/" + name + ".kubeconfig",
}
c.opts = terraform.WithDefaultRetryableErrors(t, &terraform.Options{
TerraformDir: c.terraformDir,
Vars: map[string]interface{}{
"zone": c.zone,
"cluster_name": c.name,
"cluster_version": c.version,
"kubeconfig_file": c.kubeConfig,
"use_dataplanev2": c.networkMode == DataPlaneV2,
},
})
return c
}
func (g *GKECluster) Create(t *testing.T) {
// terraform init
terraform.InitAndPlan(t, g.opts)
// terraform apply
terraform.Apply(t, g.opts)
}
func (g *GKECluster) Destroy(t *testing.T) {
if g.opts != nil {
terraform.Destroy(t, g.opts)
}
}
func (g *GKECluster) KubeConfigGet() string {
return g.kubeConfigPath
}
func (g *GKECluster) SkipNetPolTests() bool {
return false // network policies are supported by all gke cluster configurations
}
================================================
FILE: e2e/helpers/kind.go
================================================
package helpers
import (
"os"
"testing"
"github.com/gruntwork-io/terratest/modules/k8s"
"sigs.k8s.io/kind/pkg/cluster"
"sigs.k8s.io/kind/pkg/cmd"
)
type KindCluster struct {
name string
networkMode NetworkMode
configPath string
kubeConfigPath string
provider *cluster.Provider
}
func NewKindCluster(t *testing.T, WorkspaceDir string, clusterNameSuffix string, nm NetworkMode) *KindCluster {
name := "netassert-" + clusterNameSuffix
c := &KindCluster{
name: name,
networkMode: nm,
configPath: WorkspaceDir + "/kind-config.yaml",
kubeConfigPath: WorkspaceDir + "/" + name + ".kubeconfig",
}
return c
}
func (k *KindCluster) Create(t *testing.T) {
if _, err := os.Stat(k.configPath); os.IsNotExist(err) {
t.Fatalf("Error: config file %s does not exit", k.configPath)
}
provider := cluster.NewProvider(
cluster.ProviderWithLogger(cmd.NewLogger()),
)
t.Logf("Creating cluster %s", k.name)
err := provider.Create(
k.name,
cluster.CreateWithKubeconfigPath(k.kubeConfigPath),
cluster.CreateWithConfigFile(k.configPath),
cluster.CreateWithDisplayUsage(false),
cluster.CreateWithDisplaySalutation(false),
)
if err != nil {
t.Fatalf("Error while creating cluster: %v", err)
}
k.provider = provider
options := k8s.NewKubectlOptions("", k.kubeConfigPath, "")
k8s.KubectlApply(t, options, "https://raw.githubusercontent.com/projectcalico/calico/v3.31.3/manifests/calico.yaml")
}
func (k *KindCluster) Destroy(t *testing.T) {
if k.provider != nil {
t.Logf("Deleting cluster %s", k.name)
if err := k.provider.Delete(k.name, k.kubeConfigPath); err != nil {
t.Errorf("Error while deleting cluster %s: %v", k.name, err)
}
}
_ = os.Remove(k.kubeConfigPath)
}
func (k *KindCluster) KubeConfigGet() string {
return k.kubeConfigPath
}
func (k *KindCluster) SkipNetPolTests() bool {
return false
}
================================================
FILE: e2e/manifests/networkpolicies.yaml
================================================
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
namespace: web
name: web
spec:
podSelector:
matchLabels:
app: nginx
policyTypes:
- Egress
egress: []
================================================
FILE: e2e/manifests/test-cases.yaml
================================================
---
- name: busybox-deploy-to-echoserver-deploy
type: k8s
protocol: tcp
targetPort: 8080
timeoutSeconds: 269
attempts: 3
exitCode: 0
src:
k8sResource:
kind: deployment
name: busybox
namespace: busybox
dst:
k8sResource:
kind: deployment
name: echoserver
namespace: echoserver
######
######
- name: busybox-deploy-to-echoserver-deploy-2
type: k8s
protocol: udp
targetPort: 53
timeoutSeconds: 269
attempts: 3
exitCode: 0
src:
k8sResource:
kind: deployment
name: busybox
namespace: busybox
dst:
k8sResource:
kind: deployment
name: echoserver
namespace: echoserver
########
#########
- name: fluentd-deamonset-to-echoserver-deploy
type: k8s
protocol: udp
targetPort: 53
timeoutSeconds: 269
attempts: 3
exitCode: 0
src:
k8sResource:
kind: daemonset
name: fluentd
namespace: fluentd
dst:
k8sResource:
kind: deployment
name: echoserver
namespace: echoserver
#######
######
- name: busybox-deploy-to-web-statefulset
type: k8s
protocol: tcp
targetPort: 80
timeoutSeconds: 269
attempts: 3
exitCode: 0
src:
k8sResource: # this is type endpoint
kind: deployment
name: busybox
namespace: busybox
dst:
k8sResource: ## this is type endpoint
kind: statefulset
name: web
namespace: web
#######
######
- name: web-statefulset-to-busybox-deploy
type: k8s
protocol: tcp
targetPort: 8080
timeoutSeconds: 269
attempts: 3
exitCode: 1
src:
k8sResource: ## this is type endpoint
kind: statefulset
name: web
namespace: web
dst:
k8sResource:
kind: deployment
name: echoserver
namespace: echoserver
#######
######
- name: fluentd-daemonset-to-web-statefulset
type: k8s
protocol: tcp
targetPort: 80
timeoutSeconds: 269
attempts: 3
exitCode: 0
src:
k8sResource: # this is type endpoint
kind: daemonset
name: fluentd
namespace: fluentd
dst:
k8sResource: ## this is type endpoint
kind: statefulset
name: web
namespace: web
###
####
- name: busybox-deploy-to-control-plane-dot-io
type: k8s
protocol: tcp
targetPort: 80
timeoutSeconds: 269
attempts: 10
exitCode: 0
src:
k8sResource: # type endpoint
kind: deployment
name: busybox
namespace: busybox
dst:
host: # type host or node or machine
name: control-plane.io
###
###
- name: test-from-pod1-to-pod2
type: k8s
protocol: tcp
targetPort: 80
timeoutSeconds: 269
attempts: 3
exitCode: 0
src:
k8sResource: ##
kind: pod
name: pod1
namespace: pod1
dst:
k8sResource:
kind: pod
name: pod2
namespace: pod2
###
###
- name: busybox-deploy-to-fake-host
type: k8s
protocol: tcp
targetPort: 333
timeoutSeconds: 269
attempts: 3
exitCode: 1
src:
k8sResource: # type endpoint
kind: deployment
name: busybox
namespace: busybox
dst:
host: # type host or node or machine
name: 0.0.0.0
================================================
FILE: e2e/manifests/workload.yaml
================================================
---
apiVersion: v1
kind: Namespace
metadata:
name: fluentd
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd
namespace: fluentd
labels:
k8s-app: fluentd-logging
spec:
selector:
matchLabels:
name: fluentd-elasticsearch
template:
metadata:
labels:
name: fluentd-elasticsearch
spec:
tolerations:
# these tolerations are to have the daemonset runnable on control plane nodes
# remove them if your control plane nodes should not run pods
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: fluentd-elasticsearch
image: fluentd:v1.18-1
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
---
apiVersion: v1
kind: Namespace
metadata:
name: echoserver
---
apiVersion: v1
kind: Namespace
metadata:
name: busybox
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: echoserver
namespace: echoserver
labels:
app: echoserver-deploy
spec:
replicas: 1
selector:
matchLabels:
app: echoserver
template:
metadata:
labels:
app: echoserver
spec:
containers:
- name: echoserver
image: k8s.gcr.io/e2e-test-images/echoserver:2.5
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
name: web
resources:
requests:
memory: 64Mi
cpu: 300m
limits:
memory: 64Mi
cpu: 400m
securityContext:
allowPrivilegeEscalation: false
privileged: false
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: busybox
namespace: busybox
labels:
app: busybox
spec:
replicas: 1
selector:
matchLabels:
app: busybox
template:
metadata:
labels:
app: busybox
spec:
containers:
- name: busybox
image: busybox
command:
- sleep
- "360000"
imagePullPolicy: IfNotPresent
resources:
requests:
memory: 64Mi
cpu: 300m
limits:
memory: 64Mi
cpu: 400m
securityContext:
allowPrivilegeEscalation: false
privileged: false
---
apiVersion: v1
kind: Namespace
metadata:
name: pod1
---
apiVersion: v1
kind: Namespace
metadata:
name: pod2
---
apiVersion: v1
kind: Pod
metadata:
name: pod2
namespace: pod2
labels:
name: pod2
spec:
containers:
- name: webserver
image: nginx:latest
ports:
- containerPort: 80
---
apiVersion: v1
kind: Pod
metadata:
name: pod1
namespace: pod1
labels:
name: pod1
spec:
containers:
- name: busybox
image: busybox
command:
- sleep
- "360000"
imagePullPolicy: IfNotPresent
resources:
requests:
memory: 64Mi
cpu: 300m
limits:
memory: 64Mi
cpu: 400m
---
apiVersion: v1
kind: Namespace
metadata:
name: web
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
namespace: web
spec:
serviceName: "nginx"
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
name: web
================================================
FILE: fluxcd-demo/README.md
================================================
# 🚀 FluxCD Demo Guide
This guide walks you through setting up a **FluxCD** demo environment using **kind** (Kubernetes in Docker) and a **local Helm chart registry**.
You’ll see how Flux automates Helm releases and how to observe its reconciliation behavior in action while running tests with **NetAssert**.
---
## 🧰 Prerequisites
Before starting, make sure you have the following tools installed:
- [Docker](https://docs.docker.com/get-docker/)
- [kubectl](https://kubernetes.io/docs/tasks/tools/)
- [kind](https://kind.sigs.k8s.io/)
- [Helm](https://helm.sh/docs/intro/install/)
---
## 🏗️ Step 1: Set Up the Environment
### 1.1 Start a Local Docker Registry
FluxCD can work with OCI-based Helm registries. Start a local Docker registry to host your Helm charts:
```bash
docker run -d -p 5000:5000 --restart=always --name registry-5000 registry:2
```
This creates a local registry accessible at `localhost:5000`.
---
### 1.2 Create a Kind Cluster
Create a local Kubernetes cluster using your configuration file:
```bash
kind create cluster --config kind-cluster.yaml
```
Once complete, verify the cluster is ready:
```bash
kubectl cluster-info
kubectl get nodes
```
---
## ⚙️ Step 2: Install FluxCD
Refer to the official documentation for detailed installation instructions:
👉 [FluxCD Installation Guide](https://fluxcd.io/flux/installation/)
For this demo, you can use the following command:
```bash
kubectl apply -f https://github.com/fluxcd/flux2/releases/download/v2.7.2/install.yaml
```
Verify that FluxCD is running:
```bash
kubectl get pods -n flux-system
```
Expected output should include components like:
```
helm-controller
kustomize-controller
notification-controller
source-controller
```
All should reach the `Running` state.
---
## 📦 Step 3: Package and Push the Helm Chart
### 3.1 Update Chart Versions
Before packaging, update the NetAssert subchart to a version available in the packages section of this repo.
---
### 3.2 Package the Helm Chart
Run the following command to package your chart into a `.tgz` archive:
```bash
helm package ./helm -d .
```
This produces a packaged chart file, for example:
```
./fluxcd-demo-0.0.1-dev.tgz
```
---
### 3.3 Push the Chart to the Local Registry
Push the packaged Helm chart to your local OCI registry:
```bash
helm push ./fluxcd-demo-0.0.1-dev.tgz oci://localhost:5000/fluxcd/
```
---
### 3.4 Apply the FluxCD configs
Apply the fluxcd-helmconfig.yaml file so FluxCD can release the charts:
```bash
kubectl apply -f fluxcd-helmconfig.yaml
```
---
## 🔄 Step 4: Watch Flux Reconcile the Release with NetAssert Tests
Flux continuously monitors and applies Helm releases defined in your cluster.
To observe its behavior, list Helm releases managed by Flux:
```bash
kubectl get helmreleases
```
Flux will automatically pull your Helm chart from the registry and apply it.
---
### 🧩 What to Observe
- The **init container** in your k8s deployment object intentionally delay completion.
- The **Netassert** job will not be created until the deployment finishes.
- Once the deployments completes, Netassert will start running as a Job, and once finished it is going to make the release marked as successful or failed.
---
## 🔁 Step 5: Demonstrate an Upgrade
You can simulate a Helm chart upgrade to observe Flux’s automated update handling.
1. **Update chart version** — bump your chart version.
2. **Repackage** the chart:
```bash
helm package ./helm -d .
```
3. **Push** the new version to the registry:
```bash
helm push ./fluxcd-demo-0.0.2-dev.tgz oci://localhost:5000/fluxcd/
```
4. **Watch** Flux detect and reconcile the new version:
```bash
kubectl get helmreleases -w
```
You’ll see Flux automatically roll out the new chart and update your resources in place, and then run the NetAssert tests.
================================================
FILE: fluxcd-demo/fluxcd-helmconfig.yaml
================================================
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: demo-repo
namespace: default
spec:
type: "oci"
insecure: true
interval: 10s
url: oci://host.docker.internal:5000/fluxcd
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: demo-release
namespace: default
spec:
interval: 10s
timeout: 5m
chart:
spec:
chart: fluxcd-demo
version: '0.0.x-dev'
sourceRef:
kind: HelmRepository
name: demo-repo
interval: 1m
releaseName: myhelmrelease
# valuesFrom:
# - kind: ConfigMap
# name: tests
# valuesKey: test-cases.yaml
# targetPath: testFile
================================================
FILE: fluxcd-demo/helm/Chart.yaml
================================================
apiVersion: v1
description: fluxcd-demo
name: fluxcd-demo
version: 0.0.1-dev
appVersion: 0.0.1-dev
dependencies:
- name: netassert
repository: oci://ghcr.io/controlplaneio/charts
version:
================================================
FILE: fluxcd-demo/helm/templates/_helpers.tpl
================================================
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "fluxcd-demo.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "fluxcd-demo.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "fluxcd-demo.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
================================================
FILE: fluxcd-demo/helm/templates/deployment.yaml
================================================
---
apiVersion: v1
kind: Namespace
metadata:
name: echoserver
---
apiVersion: v1
kind: Namespace
metadata:
name: busybox
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "fluxcd-demo.fullname" . }}-echoserver
namespace: echoserver
labels:
app: echoserver-deploy
spec:
replicas: 1
selector:
matchLabels:
app: echoserver
template:
metadata:
labels:
app: echoserver
spec:
initContainers:
- name: "sleepy"
image: busybox:1.36
command: ["sh", "-c", "echo 'Sleeping...'; sleep 20"]
containers:
- name: echoserver
image: k8s.gcr.io/e2e-test-images/echoserver:2.5
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
name: web
resources:
requests:
memory: 64Mi
cpu: 300m
limits:
memory: 64Mi
cpu: 400m
securityContext:
allowPrivilegeEscalation: false
privileged: false
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "fluxcd-demo.fullname" . }}-busybox
namespace: busybox
labels:
app: busybox
spec:
replicas: 1
selector:
matchLabels:
app: busybox
template:
metadata:
labels:
app: busybox
spec:
containers:
- name: busybox
image: busybox
command:
- sleep
- "360000"
imagePullPolicy: IfNotPresent
resources:
requests:
memory: 64Mi
cpu: 300m
limits:
memory: 64Mi
cpu: 400m
securityContext:
allowPrivilegeEscalation: false
privileged: false
...
================================================
FILE: fluxcd-demo/helm/templates/pod1-pod2.yaml
================================================
---
apiVersion: v1
kind: Namespace
metadata:
name: pod1
---
apiVersion: v1
kind: Namespace
metadata:
name: pod2
---
apiVersion: v1
kind: Pod
metadata:
name: {{ template "fluxcd-demo.fullname" . }}-pod2
namespace: pod2
spec:
containers:
- name: webserver
image: nginx:latest
ports:
- containerPort: 80
---
apiVersion: v1
kind: Pod
metadata:
name: {{ template "fluxcd-demo.fullname" . }}-pod1
namespace: pod1
spec:
containers:
- name: busybox
image: busybox
command:
- sleep
- "360000"
imagePullPolicy: IfNotPresent
resources:
requests:
memory: 64Mi
cpu: 300m
limits:
memory: 64Mi
cpu: 400m
================================================
FILE: fluxcd-demo/helm/templates/post-deploy-tests.yaml
================================================
apiVersion: v1
data:
test.yaml: |
---
- name: busybox-deploy-to-echoserver-deploy
type: k8s
protocol: tcp
targetPort: 8080
timeoutSeconds: 67
attempts: 3
exitCode: 0
src:
k8sResource:
kind: deployment
name: {{ template "fluxcd-demo.fullname" . }}-busybox
namespace: busybox
dst:
k8sResource:
kind: deployment
name: {{ template "fluxcd-demo.fullname" . }}-echoserver
namespace: echoserver
######
######
- name: busybox-deploy-to-echoserver-deploy-2
type: k8s
protocol: udp
targetPort: 53
timeoutSeconds: 67
attempts: 1
exitCode: 0
src:
k8sResource:
kind: deployment
name: {{ template "fluxcd-demo.fullname" . }}-busybox
namespace: busybox
dst:
k8sResource:
kind: deployment
name: {{ template "fluxcd-demo.fullname" . }}-echoserver
namespace: echoserver
########
#########
#######
######
- name: busybox-deploy-to-web-statefulset
type: k8s
protocol: tcp
targetPort: 80
timeoutSeconds: 67
attempts: 3
exitCode: 0
src:
k8sResource: # this is type endpoint
kind: deployment
name: {{ template "fluxcd-demo.fullname" . }}-busybox
namespace: busybox
dst:
k8sResource: ## this is type endpoint
kind: statefulset
name: {{ template "fluxcd-demo.fullname" . }}-web
namespace: web
###
####
- name: busybox-deploy-to-control-plane-dot-io
type: k8s
protocol: tcp
targetPort: 80
timeoutSeconds: 67
attempts: 3
exitCode: 0
src:
k8sResource: # type endpoint
kind: deployment
name: {{ template "fluxcd-demo.fullname" . }}-busybox
namespace: busybox
dst:
host: # type host or node or machine
name: control-plane.io
###
###
- name: test-from-pod1-to-pod2
type: k8s
protocol: tcp
targetPort: 80
timeoutSeconds: 67
attempts: 3
exitCode: 0
src:
k8sResource: ##
kind: pod
name: {{ template "fluxcd-demo.fullname" . }}-pod1
namespace: pod1
dst:
k8sResource:
kind: pod
name: {{ template "fluxcd-demo.fullname" . }}-pod2
namespace: pod2
###
###
- name: busybox-deploy-to-fake-host
type: k8s
protocol: tcp
targetPort: 333
timeoutSeconds: 67
attempts: 3
exitCode: 1
src:
k8sResource: # type endpoint
kind: deployment
name: {{ template "fluxcd-demo.fullname" . }}-busybox
namespace: busybox
dst:
host: # type host or node or machine
name: 0.0.0.0
kind: ConfigMap
metadata:
name: "{{ .Release.Name }}-netassert"
================================================
FILE: fluxcd-demo/helm/templates/statefulset.yaml
================================================
---
apiVersion: v1
kind: Namespace
metadata:
name: web
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "fluxcd-demo.fullname" . }}-web
namespace: web
spec:
serviceName: "nginx"
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.28
ports:
- containerPort: 80
name: web
...
================================================
FILE: fluxcd-demo/helm/values.yaml
================================================
================================================
FILE: fluxcd-demo/kind-cluster.yaml
================================================
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."host.docker.internal:5050"]
endpoint = ["http://host.docker.internal:5050"]
[plugins."io.containerd.grpc.v1.cri".registry.configs."host.docker.internal:5050"]
insecure_skip_verify = true
================================================
FILE: go.mod
================================================
module github.com/controlplaneio/netassert/v2
go 1.25.4
require (
github.com/google/uuid v1.6.0
github.com/gruntwork-io/terratest v0.55.0
github.com/hashicorp/go-hclog v1.6.3
github.com/spf13/cobra v1.10.2
github.com/stretchr/testify v1.11.1
go.uber.org/automaxprocs v1.6.0
go.uber.org/mock v0.4.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.35.0
k8s.io/apimachinery v0.35.0
k8s.io/client-go v0.35.0
k8s.io/utils v0.0.0-20260108192941-914a6e750570
sigs.k8s.io/kind v0.31.0
)
require (
al.essio.dev/pkg/shellescape v1.5.1 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/BurntSushi/toml v1.5.0 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.19 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect
github.com/aws/aws-sdk-go-v2/service/acm v1.37.19 // indirect
github.com/aws/aws-sdk-go-v2/service/autoscaling v1.62.5 // indirect
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.63.1 // indirect
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.53.6 // indirect
github.com/aws/aws-sdk-go-v2/service/ec2 v1.279.2 // indirect
github.com/aws/aws-sdk-go-v2/service/ecr v1.55.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0 // indirect
github.com/aws/aws-sdk-go-v2/service/iam v1.53.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect
github.com/aws/aws-sdk-go-v2/service/kms v1.49.5 // indirect
github.com/aws/aws-sdk-go-v2/service/lambda v1.87.1 // indirect
github.com/aws/aws-sdk-go-v2/service/rds v1.114.0 // indirect
github.com/aws/aws-sdk-go-v2/service/route53 v1.62.1 // indirect
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 // indirect
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.41.1 // indirect
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.39.11 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.42.21 // indirect
github.com/aws/aws-sdk-go-v2/service/ssm v1.67.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
github.com/aws/smithy-go v1.24.0 // indirect
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/boombuler/barcode v1.1.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-errors/errors v1.5.1 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-openapi/jsonpointer v0.22.4 // indirect
github.com/go-openapi/jsonreference v0.21.4 // indirect
github.com/go-openapi/swag v0.25.4 // indirect
github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
github.com/go-openapi/swag/conv v0.25.4 // indirect
github.com/go-openapi/swag/fileutils v0.25.4 // indirect
github.com/go-openapi/swag/jsonname v0.25.4 // indirect
github.com/go-openapi/swag/jsonutils v0.25.4 // indirect
github.com/go-openapi/swag/loading v0.25.4 // indirect
github.com/go-openapi/swag/mangling v0.25.4 // indirect
github.com/go-openapi/swag/netutils v0.25.4 // indirect
github.com/go-openapi/swag/stringutils v0.25.4 // indirect
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
github.com/go-sql-driver/mysql v1.9.3 // indirect
github.com/google/gnostic-models v0.7.1 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/gruntwork-io/go-commons v0.17.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-getter/v2 v2.2.3 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/hashicorp/go-version v1.8.0 // indirect
github.com/hashicorp/hcl/v2 v2.24.0 // indirect
github.com/hashicorp/terraform-json v0.27.2 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgx/v5 v5.8.0 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jinzhu/copier v0.4.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.18.3 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-zglob v0.0.6 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pquerna/otp v1.5.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/tmccombs/hcl2json v0.6.8 // indirect
github.com/ulikunitz/xz v0.5.15 // indirect
github.com/urfave/cli/v2 v2.27.7 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 // indirect
github.com/zclconf/go-cty v1.17.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.47.0 // indirect
golang.org/x/mod v0.32.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/oauth2 v0.34.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/term v0.39.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/time v0.14.0 // indirect
golang.org/x/tools v0.41.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)
// go: github.com/imdario/mergo@v1.0.0: parsing go.mod:
// module declares its path as: dario.cat/mergo
// but was required as: github.com/imdario/mergo
replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16
================================================
FILE: go.sum
================================================
al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho=
al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.19 h1:Gxj3kAlmM+a/VVO4YNsmgHGVUZhSxs0tuVwLIxZBCtM=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.19/go.mod h1:XGq5kImVqQT4HUNbbG+0Y8O74URsPNH7CGPg1s1HW5E=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0=
github.com/aws/aws-sdk-go-v2/service/acm v1.37.19 h1:6BPfgg/Y4Pmrdr8KDwHx2CYkw8qPEaGQ+aixjuAY/0U=
github.com/aws/aws-sdk-go-v2/service/acm v1.37.19/go.mod h1:mhOStWeEa1xP99WNNPstX75qgqWgJycL5H7UwZQbqbo=
github.com/aws/aws-sdk-go-v2/service/autoscaling v1.62.5 h1:3maqUQlVW7C6zAdSknv6V/LInH/RJaDW0kTFcy7dkOw=
github.com/aws/aws-sdk-go-v2/service/autoscaling v1.62.5/go.mod h1:8O5Pj92iNpfw/Fa7WdHbn6YiEjDoVdutz+9PGRNoP3Y=
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.63.1 h1:l65dmgr7tO26EcHe6WMdseRnFLoJ2nqdkPz1nJdXfaw=
github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.63.1/go.mod h1:wvnXh1w1pGS2UpEvPTKSjXYuxiXhuvob/IMaK2AWvek=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.53.6 h1:LNmvkGzDO5PYXDW6m7igx+s2jKaPchpfbS0uDICywFc=
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.53.6/go.mod h1:ctEsEHY2vFQc6i4KU07q4n68v7BAmTbujv2Y+z8+hQY=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.279.2 h1:MG12Z/W1zzJLkw2gCU2gKZ872rqLM0pi9LdkZ/z3FHc=
github.com/aws/aws-sdk-go-v2/service/ec2 v1.279.2/go.mod h1:Uy+C+Sc58jozdoL1McQr8bDsEvNFx+/nBY+vpO1HVUY=
github.com/aws/aws-sdk-go-v2/service/ecr v1.55.1 h1:B7f9R99lCF83XlolTg6d6Lvghyto+/VU83ZrneAVfK8=
github.com/aws/aws-sdk-go-v2/service/ecr v1.55.1/go.mod h1:cpYRXx5BkmS3mwWRKPbWSPKmyAUNL7aLWAPiiinwk/U=
github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0 h1:MzP/ElwTpINq+hS80ZQz4epKVnUTlz8Sz+P/AFORCKM=
github.com/aws/aws-sdk-go-v2/service/ecs v1.71.0/go.mod h1:pMlGFDpHoLTJOIZHGdJOAWmi+xeIlQXuFTuQxs1epYE=
github.com/aws/aws-sdk-go-v2/service/iam v1.53.2 h1:62G6btFUwAa5uR5iPlnlNVAM0zJSLbWgDfKOfUC7oW4=
github.com/aws/aws-sdk-go-v2/service/iam v1.53.2/go.mod h1:av9clChrbZbJ5E21msSsiT2oghl2BJHfQGhCkXmhyu8=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE=
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.17 h1:Nhx/OYX+ukejm9t/MkWI8sucnsiroNYNGb5ddI9ungQ=
github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.17/go.mod h1:AjmK8JWnlAevq1b1NBtv5oQVG4iqnYXUufdgol+q9wg=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g=
github.com/aws/aws-sdk-go-v2/service/kms v1.49.5 h1:DKibav4XF66XSeaXcrn9GlWGHos6D/vJ4r7jsK7z5CE=
github.com/aws/aws-sdk-go-v2/service/kms v1.49.5/go.mod h1:1SdcmEGUEQE1mrU2sIgeHtcMSxHuybhPvuEPANzIDfI=
github.com/aws/aws-sdk-go-v2/service/lambda v1.87.1 h1:QBdmTXWwqVgx0PueT/Xgp2+al5HR0gAV743pTzYeBRw=
github.com/aws/aws-sdk-go-v2/service/lambda v1.87.1/go.mod h1:ogjbkxFgFOjG3dYFQ8irC92gQfpfMDcy1RDKNSZWXNU=
github.com/aws/aws-sdk-go-v2/service/rds v1.114.0 h1:p9c6HDzx6sTf7uyc9xsQd693uzArsPrsVr9n0oRk7DU=
github.com/aws/aws-sdk-go-v2/service/rds v1.114.0/go.mod h1:JBRYWpz5oXQtHgQC+X8LX9lh0FBCwRHJlWEIT+TTLaE=
github.com/aws/aws-sdk-go-v2/service/route53 v1.62.1 h1:1jIdwWOulae7bBLIgB36OZ0DINACb1wxM6wdGlx4eHE=
github.com/aws/aws-sdk-go-v2/service/route53 v1.62.1/go.mod h1:tE2zGlMIlxWv+7Otap7ctRp3qeKqtnja7DZguj3Vu/Y=
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 h1:C2dUPSnEpy4voWFIq3JNd8gN0Y5vYGDo44eUE58a/p8=
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.41.1 h1:72DBkm/CCuWx2LMHAXvLDkZfzopT3psfAeyZDIt1/yE=
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.41.1/go.mod h1:A+oSJxFvzgjZWkpM0mXs3RxB5O1SD6473w3qafOC9eU=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
github.com/aws/aws-sdk-go-v2/service/sns v1.39.11 h1:Ke7RS0NuP9Xwk31prXYcFGA1Qfn8QmNWcxyjKPcXZdc=
github.com/aws/aws-sdk-go-v2/service/sns v1.39.11/go.mod h1:hdZDKzao0PBfJJygT7T92x2uVcWc/htqlhrjFIjnHDM=
github.com/aws/aws-sdk-go-v2/service/sqs v1.42.21 h1:Oa0IhwDLVrcBHDlNo1aosG4CxO4HyvzDV5xUWqWcBc0=
github.com/aws/aws-sdk-go-v2/service/sqs v1.42.21/go.mod h1:t98Ssq+qtXKXl2SFtaSkuT6X42FSM//fnO6sfq5RqGM=
github.com/aws/aws-sdk-go-v2/service/ssm v1.67.8 h1:31Llf5VfrZ78YvYs7sWcS7L2m3waikzRc6q1nYenVS4=
github.com/aws/aws-sdk-go-v2/service/ssm v1.67.8/go.mod h1:/jgaDlU1UImoxTxhRNxXHvBAPqPZQ8oCjcPbbkR6kac=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.1.0 h1:ChaYjBR63fr4LFyGn8E8nt7dBSt3MiU3zMOZqFvVkHo=
github.com/boombuler/barcode v1.1.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=
github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8=
github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4=
github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=
github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ=
github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4=
github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=
github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4=
github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU=
github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y=
github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk=
github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA=
github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY=
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo=
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM=
github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s=
github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE=
github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48=
github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg=
github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0=
github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg=
github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8=
github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0=
github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw=
github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE=
github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw=
github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc=
github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4=
github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg=
github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=
github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M=
github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8=
github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c=
github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gruntwork-io/go-commons v0.17.2 h1:14dsCJ7M5Vv2X3BIPKeG9Kdy6vTMGhM8L4WZazxfTuY=
github.com/gruntwork-io/go-commons v0.17.2/go.mod h1:zs7Q2AbUKuTarBPy19CIxJVUX/rBamfW8IwuWKniWkE=
github.com/gruntwork-io/terratest v0.55.0 h1:NgG6lm2dArdQ3KcOofw6PTfVRK1Flt7L3NNhFSBo72A=
github.com/gruntwork-io/terratest v0.55.0/go.mod h1:OE0Jsc8Wn5kw/QySLbBd53g9Gt+xfDyDKChwRHwkKvI=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-getter/v2 v2.2.3 h1:6CVzhT0KJQHqd9b0pK3xSP0CM/Cv+bVhk+jcaRJ2pGk=
github.com/hashicorp/go-getter/v2 v2.2.3/go.mod h1:hp5Yy0GMQvwWVUmwLs3ygivz1JSLI323hdIE9J9m7TY=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=
github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/hcl/v2 v2.24.0 h1:2QJdZ454DSsYGoaE6QheQZjtKZSUs9Nh2izTWiwQxvE=
github.com/hashicorp/hcl/v2 v2.24.0/go.mod h1:oGoO1FIQYfn/AgyOhlg9qLC6/nOJPX3qGbkZpYAcqfM=
github.com/hashicorp/terraform-json v0.27.2 h1:BwGuzM6iUPqf9JYM/Z4AF1OJ5VVJEEzoKST/tRDBJKU=
github.com/hashicorp/terraform-json v0.27.2/go.mod h1:GzPLJ1PLdUG5xL6xn1OXWIjteQRT2CNT9o/6A9mi9hE=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-zglob v0.0.6 h1:mP8RnmCgho4oaUYDIDn6GNxYk+qJGUs8fJLn+twYj2A=
github.com/mattn/go-zglob v0.0.6/go.mod h1:MxxjyoXXnMxfIpxTK2GAkw1w8glPsQILx3N5wrKakiY=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs=
github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tmccombs/hcl2json v0.6.8 h1:9bd7c3jZTj9FsN+lDIzrvLmXqxvCgydb84Uc4DBxOHA=
github.com/tmccombs/hcl2json v0.6.8/go.mod h1:qjEaQ4hBNPeDWOENB9yg6+BzqvtMA1MMN1+goFFh8Vc=
github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY=
github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli/v2 v2.27.7 h1:bH59vdhbjLv3LAvIu6gd0usJHgoTTPhCFib8qqOwXYU=
github.com/urfave/cli/v2 v2.27.7/go.mod h1:CyNAG/xg+iAOg0N4MPGZqVmv2rCoP267496AOXUZjA4=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg=
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/zclconf/go-cty v1.17.0 h1:seZvECve6XX4tmnvRzWtJNHdscMtYEx5R7bnnVyd/d0=
github.com/zclconf/go-cty v1.17.0/go.mod h1:wqFzcImaLTI6A5HfsRwB0nj5n0MRZFwmey8YoFPPs3U=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ=
k8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/utils v0.0.0-20260108192941-914a6e750570 h1:JT4W8lsdrGENg9W+YwwdLJxklIuKWdRm+BC+xt33FOY=
k8s.io/utils v0.0.0-20260108192941-914a6e750570/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kind v0.31.0 h1:UcT4nzm+YM7YEbqiAKECk+b6dsvc/HRZZu9U0FolL1g=
sigs.k8s.io/kind v0.31.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E=
sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
================================================
FILE: helm/Chart.yaml
================================================
apiVersion: v1
description: NetAssert
name: netassert
version: 1.0.0-dev
appVersion: 1.0.0-dev
home: https://github.com/controlplaneio/netassert
sources:
- https://github.com/controlplaneio/netassert
================================================
FILE: helm/README.md
================================================
================================================
FILE: helm/templates/NOTES.txt
================================================
================================================
FILE: helm/templates/_helpers.tpl
================================================
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "netassert.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "netassert.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "netassert.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Add Helm annotations when used as a post-deployment tast.
*/}}
{{- define "netassert.hookAnnotations" -}}
{{- if eq .Values.mode "post-deploy" }}
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-weight: "0"
{{- end }}
{{- end }}
================================================
FILE: helm/templates/clusterrole.yaml
================================================
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "netassert.fullname" . }}
annotations:
{{- include "netassert.hookAnnotations" . | nindent 4 }}
labels:
app: {{ template "netassert.name" . }}
chart: {{ template "netassert.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods/ephemeralcontainers"]
verbs: ["patch"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get", "list"]
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets", "daemonsets"]
verbs: ["get"]
================================================
FILE: helm/templates/clusterrolebinding.yaml
================================================
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "netassert.fullname" . }}
annotations:
{{- include "netassert.hookAnnotations" . | nindent 4 }}
labels:
app: {{ template "netassert.name" . }}
chart: {{ template "netassert.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "netassert.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "netassert.fullname" . }}
namespace: {{ .Release.Namespace }}
================================================
FILE: helm/templates/configmap.yaml
================================================
{{- if ne .Values.mode "post-deploy" }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "netassert.fullname" . }}
annotations:
{{- include "netassert.hookAnnotations" . | nindent 4 }}
labels:
app: {{ template "netassert.name" . }}
chart: {{ template "netassert.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
data:
test.yaml: |-
{{ .Values.testFile | indent 4 }}
{{- end }}
================================================
FILE: helm/templates/job.yaml
================================================
apiVersion: batch/v1
kind: Job
metadata:
name: {{ template "netassert.fullname" . }}
annotations:
{{- include "netassert.hookAnnotations" . | nindent 4 }}
labels:
app: {{ template "netassert.name" . }}
chart: {{ template "netassert.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
parallelism: {{ .Values.job.parallelism }}
completions: {{ .Values.job.completions }}
activeDeadlineSeconds: {{ .Values.job.activeDeadlineSeconds }}
backoffLimit: {{ .Values.job.backoffLimit }}
ttlSecondsAfterFinished: {{ .Values.job.ttlSecondsAfterFinished }}
template:
metadata:
labels:
app: {{ template "netassert.name" . }}
release: {{ .Release.Name }}
component: job
spec:
restartPolicy: {{ default "Never" .Values.job.restartPolicy }}
serviceAccount: {{ template "netassert.fullname" . }}
securityContext:
{{ toYaml .Values.securityContext | nindent 8 }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
containers:
- name: netassert
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
{{ toYaml .Values.args | nindent 12 }}
env:
{{- range $key, $value := .Values.env }}
- name: {{ $key | upper | replace "." "_" }}
value: {{ $value | quote }}
{{- end }}
resources:
{{ toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: testfile
mountPath: /tests
{{- if .Values.volumeMounts }}
{{ toYaml .Values.volumeMounts | nindent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: testfile
configMap:
name: {{ template "netassert.fullname" . }}
{{- if .Values.volumes }}
{{ toYaml .Values.volumes | nindent 8 }}
{{- end }}
{{- range $key, $value := .Values.secretMounts }}
- name: {{ $key }}
secret:
secretName: {{ $value.secretName }}
defaultMode: {{ $value.defaultMode }}
{{- end }}
================================================
FILE: helm/templates/serviceaccount.yaml
================================================
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "netassert.fullname" . }}
annotations:
{{- include "netassert.hookAnnotations" . | nindent 4 }}
labels:
app: {{ template "netassert.name" . }}
chart: {{ template "netassert.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
================================================
FILE: helm/values.yaml
================================================
mode: post-deploy
job:
parallelism: 1
completions: 1
activeDeadlineSeconds: 900
backoffLimit: 0
ttlSecondsAfterFinished: 3600
restartPolicy: Never
terminationGracePeriodSeconds: 30
serviceAccount:
image:
repository: controlplane/netassert
tag: 1.0.0-dev
pullPolicy: IfNotPresent
args:
- run
- --input-file
- /tests/test.yaml
resources: {}
priorityClassName: ""
nodeSelector: {}
tolerations: []
affinity: {}
securityContext: {}
env: {}
volumes:
volumeMounts:
================================================
FILE: internal/data/read.go
================================================
package data
import (
"fmt"
"os"
"path/filepath"
)
// List of file extensions we support
const (
fileExtensionYAML = `.yaml`
fileExtensionYML = `.yml`
)
// ReadTestsFromDir - Reads tests cases from .yaml and .yml file present in a directory
// does not recursively read files
func ReadTestsFromDir(path string) (Tests, error) {
if path == "" {
return nil, fmt.Errorf("input dir parameter cannot be empty string")
}
var testCases Tests
fp, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("unable to open dir containing tests %q: %w", path, err)
}
// we do not recursively read all the YAML and YML files
// the depth is only 1 level
files, err := fp.ReadDir(0)
if err != nil {
return nil, fmt.Errorf("unable to read contents of the directory%q: %w", path, err)
}
for _, f := range files {
ext := filepath.Ext(f.Name())
if ext != fileExtensionYAML && ext != fileExtensionYML {
continue
}
tcFile := filepath.Join(path, f.Name())
tc, err := ReadTestsFromFile(tcFile)
if err != nil {
return nil, err
}
testCases = append(testCases, tc...)
// this is a multi-files validation each time new tests are added
if err := testCases.Validate(); err != nil {
return nil, fmt.Errorf("validation of tests from file %q failed: %w", tcFile, err)
}
}
return testCases, nil
}
// ReadTestsFromFile - reads tests from a file containing a list of Test
func ReadTestsFromFile(fileName string) (Tests, error) {
if fileName == "" {
return nil, fmt.Errorf("input fileName parameter can not be empty string")
}
if fileName == "-" {
return NewFromReader(os.Stdin)
}
fp, err := os.Open(fileName)
if err != nil {
return nil, fmt.Errorf("unable to open file %q containing tests: %w", fileName, err)
}
defer func() {
closeErr := fp.Close()
if closeErr != nil {
err = fmt.Errorf("unable to close file: %q, %w", fileName, closeErr)
}
}()
return NewFromReader(fp)
}
================================================
FILE: internal/data/read_test.go
================================================
package data
import (
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestReadTestFile(t *testing.T) {
t.Parallel()
tests := map[string]struct {
confFilepath string
wantErrMatches []string
}{
"existing valid file": {
confFilepath: "./testdata/valid/empty.yaml",
},
"existing invalid file": {
confFilepath: "./testdata/invalid/not-a-list.yaml",
wantErrMatches: []string{"failed to unmarshal tests"},
},
"not existing file": {
confFilepath: "./testdata/fake-dir/fake-file.yaml",
wantErrMatches: []string{"no such file or directory"},
},
"empty file path": {
confFilepath: "",
wantErrMatches: []string{"input fileName parameter can not be empty string"},
},
}
for name, tt := range tests {
tc := tt
t.Run(name, func(t *testing.T) {
t.Parallel()
_, err := ReadTestsFromFile(tc.confFilepath)
require.NotEqualf(t, len(tc.wantErrMatches) > 0, err == nil, "expecting an error: %v, got: %v",
tc.wantErrMatches, err)
for _, wem := range tc.wantErrMatches {
require.Equalf(t, strings.Contains(err.Error(), wem), true,
"expecting error to contain: %s, got: %v", wem, err)
}
})
}
}
func TestReadTestsFromDir(t *testing.T) {
t.Parallel()
tests := map[string]struct {
confDir string
errMsg string
wantErr bool
}{
"existing dir valid tests": {
confDir: "./testdata/valid",
},
"dir without yaml files": {
confDir: "./testdata/dir-without-yaml-files",
},
"existing dir with invalid tests": {
confDir: "./testdata/invalid",
errMsg: "failed to unmarshal tests",
wantErr: true,
},
"not existing dir": {
confDir: "./testdata/fake-dir",
errMsg: "no such file or directory",
wantErr: true,
},
"duplicated test names in different files": {
confDir: "./testdata/invalid-duplicated-names",
errMsg: "duplicate test name found",
wantErr: true,
},
"empty file path": {
confDir: "",
errMsg: "input dir parameter cannot be empty string",
wantErr: true,
},
}
for name, tt := range tests {
tc := tt
t.Run(name, func(t *testing.T) {
r := require.New(t)
t.Parallel()
_, err := ReadTestsFromDir(tc.confDir)
if tc.wantErr {
// we are expecting an error here
r.Error(err)
r.Contains(err.Error(), tc.errMsg)
return
}
r.NoError(err)
})
}
}
================================================
FILE: internal/data/tap.go
================================================
package data
import (
"fmt"
"io"
"gopkg.in/yaml.v2"
)
// TAPResult - outputs result of tests into a TAP format
func (ts *Tests) TAPResult(w io.Writer) error {
if ts == nil {
return fmt.Errorf("empty ts")
}
if len(*ts) < 1 {
return fmt.Errorf("no test were found")
}
header := "TAP version 14\n"
header += fmt.Sprintf("1..%v\n", len(*ts))
_, err := fmt.Fprint(w, header)
if err != nil {
return err
}
for index, test := range *ts {
result := ""
switch test.Pass {
case true:
result = fmt.Sprintf("ok %v - %v", index+1, test.Name)
case false:
frEscaped, err := yaml.Marshal(&test.FailureReason) // frEscaped ends with "\n"
if err != nil {
return err
}
result = fmt.Sprintf("not ok %v - %v", index+1, test.Name)
result += fmt.Sprintf("\n ---\n reason: %s ...", frEscaped)
}
if _, err := fmt.Fprintln(w, result); err != nil {
return err
}
}
return nil
}
================================================
FILE: internal/data/tap_test.go
================================================
package data
import (
"bytes"
"testing"
"github.com/stretchr/testify/require"
)
func TestTests_TAPResult(t *testing.T) {
tests := []struct {
name string
tests Tests
want string
wantErr bool
}{
{
name: "multiple tests",
tests: Tests{
&Test{Name: "test1", Pass: false, FailureReason: "example failure reason"},
&Test{Name: "pod2pod", Pass: true},
&Test{Name: "---", Pass: true},
&Test{Name: "don'tknow", Pass: false},
},
want: `TAP version 14
1..4
not ok 1 - test1
---
reason: example failure reason
...
ok 2 - pod2pod
ok 3 - ---
not ok 4 - don'tknow
---
reason: ""
...
`,
wantErr: false,
},
{
name: "emptytests",
tests: Tests{},
wantErr: true,
},
{
name: "niltests",
tests: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
w := &bytes.Buffer{}
err := tt.tests.TAPResult(w)
if !tt.wantErr {
require.NoErrorf(t, err, "Tests.TAPResult() error = %v, wantErr %v", err, tt.wantErr)
}
gotW := w.String()
require.Equalf(t, tt.want, gotW, "Tests.TAPResult() = %v, want %v")
})
}
}
================================================
FILE: internal/data/testdata/dir-without-yaml-files/.gitkeep
================================================
================================================
FILE: internal/data/testdata/invalid/duplicated-names.yaml
================================================
- name: testname
type: k8s
targetPort: 80
exitCode: 0
src:
k8sResource:
kind: deployment
name: deployment1
namespace: ns1
dst:
host:
name: "1.1.1.1"
- name: testname
type: k8s
targetPort: 80
exitCode: 0
src:
k8sResource:
kind: deployment
name: deployment1
namespace: ns1
dst:
host:
name: "1.1.1.1"
================================================
FILE: internal/data/testdata/invalid/empty-resources.yaml
================================================
- name: testname
type: k8s
targetPort: 100000
exitCode: 0
src:
k8sResource:
dumb: 44
dst:
host:
================================================
FILE: internal/data/testdata/invalid/host-as-dst-udp.yaml
================================================
- name: testname
type: k8s
protocol: udp
targetPort: 80
exitCode: 0
src:
k8sResource:
kind: deployment
name: deployment1
namespace: ns1
dst:
host:
name: "1.1.1.1"
================================================
FILE: internal/data/testdata/invalid/host-as-source.yaml
================================================
- name: testname
type: k8s
targetPort: 80
exitCode: 0
src:
host:
name: "1.1.1.1"
dst:
k8sResource:
kind: deployment
name: deployment1
namespace: ns1
================================================
FILE: internal/data/testdata/invalid/missing-fields.yaml
================================================
- attempts: -2
timeoutSeconds: -2
targetPort: 100000
exitCode: 0
src:
wrong: 44
dst:
================================================
FILE: internal/data/testdata/invalid/multiple-dst-blocks.yaml
================================================
- name: testname2
type: k8s
protocol: udp
timeoutSeconds: 50
attempts: 15
targetPort: 8080
exitCode: 1
src:
k8sResource:
kind: statefulset
name: statefulset1
namespace: ns1
dst:
k8sResource:
kind: pod
name: mypod
namespace: ns2
host:
name: "1.1.1.1"
================================================
FILE: internal/data/testdata/invalid/not-a-list.yaml
================================================
this:
should: "be"
a: "list"
================================================
FILE: internal/data/testdata/invalid/wrong-test-values.yaml
================================================
- name: testname
type: k8s
protocol: nonexisting
attempts: -2
timeoutSeconds: -2
targetPort: 100000
exitCode: 0
src:
k8sResource:
kind: newkind
name: deployment1
namespace: ns1
dst:
host:
name: ""
================================================
FILE: internal/data/testdata/invalid-duplicated-names/input1.yaml
================================================
- name: testname
type: k8s
targetPort: 80
exitCode: 0
src:
k8sResource:
kind: deployment
name: deployment1
namespace: ns1
dst:
host:
name: "1.1.1.1"
================================================
FILE: internal/data/testdata/invalid-duplicated-names/input2.yaml
================================================
- name: testname
type: k8s
targetPort: 80
exitCode: 0
src:
k8sResource:
kind: deployment
name: deployment1
namespace: ns1
dst:
host:
name: "1.1.1.1"
================================================
FILE: internal/data/testdata/valid/empty.yaml
================================================
================================================
FILE: internal/data/testdata/valid/multi.yaml
================================================
- name: testname
type: k8s
targetPort: 80
exitCode: 0
src:
k8sResource:
kind: deployment
name: deployment1
namespace: ns1
dst:
host:
name: "1.1.1.1"
- name: testname2
type: k8s
protocol: udp
timeoutSeconds: 50
attempts: 20
targetPort: 8080
exitCode: 1
src:
k8sResource:
kind: statefulset
name: statefulset1
namespace: ns1
dst:
k8sResource:
kind: pod
name: mypod
namespace: ns2
================================================
FILE: internal/data/types.go
================================================
package data
import (
"errors"
"fmt"
"io"
"gopkg.in/yaml.v3"
)
// Protocol - represents the Layer 4 protocol
type Protocol string
const (
// ProtocolTCP - represents the TCP protocol
ProtocolTCP Protocol = "tcp"
// ProtocolUDP - represents the UDP protocol
ProtocolUDP Protocol = "udp"
)
// K8sResourceKind represents the Kind of K8sResource
type K8sResourceKind string
const (
KindDeployment K8sResourceKind = "deployment"
KindStatefulSet K8sResourceKind = "statefulset"
KindDaemonSet K8sResourceKind = "daemonset"
KindPod K8sResourceKind = "pod"
)
// ValidK8sResourceKinds - holds a map of valid K8sResourceKind
var ValidK8sResourceKinds = map[K8sResourceKind]bool{
KindDeployment: true,
KindStatefulSet: true,
KindDaemonSet: true,
KindPod: true,
}
// TestType - represents a K8s test type, right now
// we only support k8s type
type TestType string
const (
K8sTest TestType = "k8s"
)
// TestTypes - holds a map of valid NetAsserTestTypes
var TestTypes = map[TestType]bool{
K8sTest: true,
}
// K8sResource - Resource hold a Kubernetes Resource
type K8sResource struct {
Kind K8sResourceKind `yaml:"kind"`
Name string `yaml:"name"`
Namespace string `yaml:"namespace"`
// Clone bool `yaml:"clone"`
}
// Src represents a source in the K8s test
type Src struct {
K8sResource *K8sResource `yaml:"k8sResource"`
}
// Host represents a host that can be used as Dst in a K8s test
type Host struct {
Name string `yaml:"name"`
}
// Dst holds the destination or the target resource of the test
type Dst struct {
K8sResource *K8sResource `yaml:"k8sResource,omitempty"`
Host *Host `yaml:"host,omitempty"`
}
// Test holds a single netAssert test
type Test struct {
Name string `yaml:"name"`
Type TestType `yaml:"type"`
Protocol Protocol `yaml:"protocol"`
TargetPort int `yaml:"targetPort"`
TimeoutSeconds int `yaml:"timeoutSeconds"`
Attempts int `yaml:"attempts"`
ExitCode int `yaml:"exitCode"`
Src *Src `yaml:"src"`
Dst *Dst `yaml:"dst"`
Pass bool `yaml:"pass"`
FailureReason string `yaml:"failureReason"`
}
// Tests - holds a slice of NetAssertTests
type Tests []*Test
func (r *K8sResource) validate() error {
if r == nil {
return fmt.Errorf("K8sResource is empty")
}
var (
nameErr error
kindErr error
nameSpaceErr error
resourceKindErr error
)
if r.Name == "" {
nameErr = fmt.Errorf("k8sResource name is missing")
}
if r.Kind == "" {
kindErr = fmt.Errorf("k8sResource kind is missing")
}
if r.Namespace == "" {
nameSpaceErr = fmt.Errorf("k8sResource namespace is missing")
}
if _, ok := ValidK8sResourceKinds[r.Kind]; !ok {
resourceKindErr = fmt.Errorf("k8sResource invalid kind '%s'", r.Kind)
}
return errors.Join(nameErr, kindErr, nameSpaceErr, resourceKindErr)
}
// validate - validates the Host type
func (h *Host) validate() error {
if h == nil {
return fmt.Errorf("host field is nil")
}
if h.Name == "" {
return fmt.Errorf("host field is set to empty string")
}
return nil
}
// validate - validates the Dst type
func (d *Dst) validate() error {
if d == nil {
return fmt.Errorf("dst field cannot be nil")
}
if d.K8sResource != nil && d.Host != nil {
return fmt.Errorf("dst field only supports K8sResource or Host but not both")
}
if d.K8sResource != nil {
return d.K8sResource.validate()
}
if d.Host != nil {
return d.Host.validate()
}
return nil
}
// validate - validates the Src type
func (d *Src) validate() error {
if d == nil {
return fmt.Errorf("src field cannot be nil")
}
if d.K8sResource == nil {
return fmt.Errorf("k8sResource field in src is currently the only source allowed")
}
return d.K8sResource.validate()
}
// validate - validates the Test case
func (te *Test) validate() error {
if te == nil {
return fmt.Errorf("test is pointing to nil")
}
var nameErr error
if te.Name == "" {
nameErr = fmt.Errorf("name field is missing")
}
var invalidProtocolErr error
if te.Protocol != ProtocolUDP && te.Protocol != ProtocolTCP {
invalidProtocolErr = fmt.Errorf("invalid protocol %s", te.Protocol)
}
var targetPortErr error
if te.TargetPort < 1 || te.TargetPort > 65535 {
targetPortErr = fmt.Errorf("targetPort out of range: %d", te.TargetPort)
}
var invalidAttemptsErr error
if te.Attempts < 1 {
invalidAttemptsErr = fmt.Errorf("attempts must be > 0")
}
var timeoutSecondsErr error
if te.TimeoutSeconds < 1 {
timeoutSecondsErr = fmt.Errorf("timeoutSeconds must be > 0")
}
var invalidTestTypeErr error
if _, ok := TestTypes[te.Type]; !ok {
invalidTestTypeErr = fmt.Errorf("invalid test type %v", te.Type)
}
var missingSrcErr, k8sResourceErr error
if te.Src == nil {
missingSrcErr = fmt.Errorf("src block must be present")
} else {
k8sResourceErr = te.Src.validate()
}
var missingDstErr, dstValidationErr error
if te.Dst == nil {
missingDstErr = fmt.Errorf("dst block must be present")
} else {
dstValidationErr = te.Dst.validate()
}
var notSupportedTest error
if te.Protocol == ProtocolUDP && te.Dst != nil && te.Dst.Host != nil {
notSupportedTest = fmt.Errorf("with udp tests the destination must be a k8sResource")
}
return errors.Join(nameErr, invalidProtocolErr, targetPortErr,
invalidAttemptsErr, timeoutSecondsErr, invalidTestTypeErr, k8sResourceErr,
dstValidationErr, missingSrcErr, missingDstErr, notSupportedTest)
}
// Validate - validates the Tests type
func (ts *Tests) Validate() error {
testNameMap := make(map[string]struct{})
for _, test := range *ts {
if err := test.validate(); err != nil {
return err
}
// if test name already exists
if _, ok := testNameMap[test.Name]; ok {
return fmt.Errorf("duplicate test name found %q", test.Name)
}
testNameMap[test.Name] = struct{}{}
}
return nil
}
// setDefaults - sets sensible defaults to the Test
func (te *Test) setDefaults() {
if te.TimeoutSeconds == 0 {
te.TimeoutSeconds = 15
}
if te.Attempts == 0 {
te.Attempts = 3
}
if te.Protocol == "" {
te.Protocol = ProtocolTCP
}
}
// UnmarshalYAML - decodes Tests type
func (ts *Tests) UnmarshalYAML(node *yaml.Node) error {
type tmpTests []*Test
var tmp tmpTests
if err := node.Decode(&tmp); err != nil {
return err
}
*ts = Tests(tmp)
if err := ts.Validate(); err != nil {
return fmt.Errorf("validation failed for tests: %w", err)
}
return nil
}
// NewFromReader - creates a new Test from an io.Reader
func NewFromReader(r io.Reader) (Tests, error) {
buf, err := io.ReadAll(r)
if err != nil {
return nil, fmt.Errorf("cannot read from reader: %w", err)
}
var tests Tests
if err := yaml.Unmarshal(buf, &tests); err != nil {
return nil, fmt.Errorf("failed to unmarshal tests: %w", err)
}
if len(tests) == 0 {
return Tests{}, nil
}
return tests, nil
}
// UnmarshalYAML - decodes and validate Test type
func (te *Test) UnmarshalYAML(node *yaml.Node) error {
// testAlias is an alias to type Test
// this is need to prevent recursive decoding
type testAlias Test
var ta testAlias
if err := node.Decode(&ta); err != nil {
return err
}
// we need to type cast ta back to p to call the original
// methods on that type to validate the Test
p := Test(ta)
p.setDefaults()
if err := p.validate(); err != nil {
return err
}
// we need to ensure that te points to the modified type
*te = p
return nil
}
================================================
FILE: internal/data/types_test.go
================================================
package data
import (
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestNewFromReader(t *testing.T) {
t.Parallel()
tests := map[string]struct {
confFile string
want Tests
wantErrMatches []string
}{
"empty": {
confFile: "empty.yaml",
want: Tests{},
},
"not a list": {
confFile: "not-a-list.yaml",
wantErrMatches: []string{"cannot unmarshal"},
},
"wrong test values": {
confFile: "wrong-test-values.yaml",
wantErrMatches: []string{
"invalid protocol",
"targetPort out of range",
"attempts must",
"timeoutSeconds must",
"k8sResource invalid kind",
"host field is set to empty string",
},
},
"missing fields": {
confFile: "missing-fields.yaml",
wantErrMatches: []string{
"name field is missing",
"targetPort out of range",
"attempts must",
"timeoutSeconds must",
"invalid",
"src is currently",
"dst block must",
},
},
"host as a source": {
confFile: "host-as-source.yaml",
wantErrMatches: []string{"k8sResource field in src is currently the only source allowed"},
},
"multiple destination blocks": {
confFile: "multiple-dst-blocks.yaml",
wantErrMatches: []string{"dst field only supports K8sResource or Host but not both"},
},
"duplicated test names": {
confFile: "duplicated-names.yaml",
wantErrMatches: []string{"duplicate test name found"},
},
"empty resources": {
confFile: "empty-resources.yaml",
wantErrMatches: []string{
"k8sResource name is missing",
"k8sResource kind is missing",
"k8sResource namespace is missing",
"k8sResource invalid kind",
},
},
"host as a destination with udp": {
confFile: "host-as-dst-udp.yaml",
wantErrMatches: []string{"with udp tests the destination must be a k8sResource"},
},
"multi valid": {
confFile: "multi.yaml",
want: Tests{
&Test{
Name: "testname",
Type: "k8s",
Protocol: ProtocolTCP,
Attempts: 3,
TimeoutSeconds: 15,
TargetPort: 80,
ExitCode: 0,
Src: &Src{
K8sResource: &K8sResource{
Name: "deployment1",
Kind: KindDeployment,
Namespace: "ns1",
},
},
Dst: &Dst{
Host: &Host{
Name: "1.1.1.1",
},
},
},
&Test{
Name: "testname2",
Type: "k8s",
Protocol: ProtocolUDP,
Attempts: 20,
TimeoutSeconds: 50,
TargetPort: 8080,
ExitCode: 1,
Src: &Src{
K8sResource: &K8sResource{
Name: "statefulset1",
Kind: KindStatefulSet,
Namespace: "ns1",
},
},
Dst: &Dst{
K8sResource: &K8sResource{
Name: "mypod",
Kind: KindPod,
Namespace: "ns2",
},
},
},
},
},
}
for name, tt := range tests {
tc := tt
t.Run(name, func(t *testing.T) {
t.Parallel()
var testFile string
if len(tc.wantErrMatches) > 0 {
testFile = filepath.Join("./testdata/invalid", tc.confFile)
} else {
testFile = filepath.Join("./testdata/valid", tc.confFile)
}
c, err := os.Open(filepath.Clean(testFile))
require.NoError(t, err, "cannot open test file %s, err: %v", testFile, err)
got, err := NewFromReader(c)
errc := c.Close()
require.NoError(t, errc, "cannot close test file %s, err: %v", testFile, errc)
require.NotEqualf(t, len(tc.wantErrMatches) > 0, err == nil, "expecting an error: %v, got: %v", tc.wantErrMatches, err)
for _, wem := range tc.wantErrMatches {
require.Equalf(t, strings.Contains(err.Error(), wem), true, "expecting error to contain: %s, got: %v", wem, err)
}
require.Equalf(t, tc.want, got, "expecting config %v, got: %v", tc.want, got)
})
}
}
================================================
FILE: internal/engine/engine.go
================================================
//go:generate mockgen -destination=engine_mocks_test.go -package=engine github.com/controlplaneio/netassert/internal/engine NetAssertTestRunner
package engine
import (
"context"
"fmt"
"sync"
"time"
"github.com/hashicorp/go-hclog"
corev1 "k8s.io/api/core/v1"
"github.com/controlplaneio/netassert/v2/internal/data"
)
// Engine - type responsible for running the netAssert test(s)
type Engine struct {
Service NetAssertTestRunner
Log hclog.Logger
}
// New - Returns a new instance of Engine
func New(service NetAssertTestRunner, log hclog.Logger) *Engine {
return &Engine{Service: service, Log: log}
}
// GetPod - returns a running Pod defined by the K8sResource
func (e *Engine) GetPod(ctx context.Context, res *data.K8sResource) (*corev1.Pod, error) {
if res == nil {
return &corev1.Pod{}, fmt.Errorf("res parameter is nil")
}
switch res.Kind {
case data.KindDeployment:
return e.Service.GetPodInDeployment(ctx, res.Name, res.Namespace)
case data.KindStatefulSet:
return e.Service.GetPodInStatefulSet(ctx, res.Name, res.Namespace)
case data.KindDaemonSet:
return e.Service.GetPodInDaemonSet(ctx, res.Name, res.Namespace)
case data.KindPod:
return e.Service.GetPod(ctx, res.Name, res.Namespace)
default:
e.Log.Error("", hclog.Fmt("%s is not supported K8sResource", res.Kind))
return &corev1.Pod{}, fmt.Errorf("%s is not supported K8sResource", res.Kind)
}
}
// RunTests - runs a list of net assert test cases
func (e *Engine) RunTests(
ctx context.Context, // context information
te data.Tests, // the list of tests we are running
snifferContainerPrefix string, // name of the sniffer container to use
snifferContainerImage string, // image location of the sniffer Container
scannerContainerPrefix string, // name of the scanner container to use
scannerContainerImage string, // image location of the scanner container
suffixLength int, // length of the random string that will be generated and appended to the container name
pause time.Duration, // time to pause before running a test
packetCaptureInterface string, // the network interface used to capture traffic by the sniffer container
) {
var wg sync.WaitGroup
for i, tc := range te {
wg.Add(1)
go func(tc *data.Test, wg *sync.WaitGroup) {
defer wg.Done()
// run the test case
err := e.RunTest(ctx, tc, snifferContainerPrefix, snifferContainerImage,
scannerContainerPrefix, scannerContainerImage, suffixLength, packetCaptureInterface)
if err != nil {
e.Log.Error("Test execution failed", "Name", tc.Name, "error", err)
tc.FailureReason = err.Error()
}
}(tc, &wg)
if i < len(te)-1 { // do not pause after the last test
cancellableDelay(ctx, pause)
}
// If the context is cancelled, we need to break out of the loop
if ctx.Err() != nil {
break
}
}
wg.Wait()
}
// cancellableDelay introduces a delay that can be interrupted by context cancellation.
// This function is useful when you want to pause execution for a specific duration,
// but also need the ability to respond quickly if an interrupt signal (like CTRL + C) is received.
func cancellableDelay(ctx context.Context, duration time.Duration) {
select {
case <-time.After(duration):
// The case of time.After(duration) is selected when the specified duration has elapsed.
// This means the function completes its normal delay without any interruption.
case <-ctx.Done():
// The ctx.Done() case is selected if the context is cancelled before the duration elapses.
// This could happen if an interrupt signal is received.
// Returning early from the function allows the program to quickly respond to the cancellation signal,
// such as cleaning up resources, stopping further processing, etc.
// No specific action is needed here other than returning from the function,
// as the cancellation of the context is handled by the caller.
return
}
}
// RunTest - Runs a single netAssert test case
func (e *Engine) RunTest(
ctx context.Context, // context passed to this function
te *data.Test, // test cases to execute
snifferContainerPrefix string, // name of the sniffer container to use
snifferContainerImage string, // image location of the sniffer Container
scannerContainerPrefix string, // name of the scanner container to use
scannerContainerImage string, // image location of the scanner container
suffixLength int, // length of string that will be generated and appended to the container name
packetCaptureInterface string, // the network interface used to capture traffic by the sniffer container
) error {
if te.Type != data.K8sTest {
return fmt.Errorf("only k8s test type is supported at this time: %s", te.Type)
}
switch te.Protocol {
case data.ProtocolTCP:
return e.RunTCPTest(ctx, te, scannerContainerPrefix, scannerContainerImage, suffixLength)
case data.ProtocolUDP:
return e.RunUDPTest(ctx,
te,
snifferContainerPrefix,
snifferContainerImage,
scannerContainerPrefix,
scannerContainerImage,
suffixLength,
packetCaptureInterface,
)
default:
e.Log.Error("error", hclog.Fmt("Only TCP/UDP protocol is supported at this time and not %s", te.Protocol))
return fmt.Errorf("only TCP/UDP protocol is supported at this time and not %v", te.Protocol)
}
}
================================================
FILE: internal/engine/engine_daemonset_test.go
================================================
package engine
import (
"context"
"fmt"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/controlplaneio/netassert/v2/internal/data"
)
func TestEngine_GetPod_DaemonSet(t *testing.T) {
mockCtrl := gomock.NewController(t)
t.Cleanup(func() {
mockCtrl.Finish()
})
var (
podName = "foo-pod"
namespace = "default"
daemonSetName = "foo-ds"
)
t.Run("GetPod from DaemonSet when Pod exists", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
res := data.K8sResource{
Kind: data.KindDaemonSet,
Name: daemonSetName,
Namespace: namespace,
}
mockRunner := NewMockNetAssertTestRunner(mockCtrl)
mockRunner.EXPECT().
GetPodInDaemonSet(ctx, daemonSetName, namespace).
Return(&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: namespace,
},
Spec: corev1.PodSpec{},
Status: corev1.PodStatus{},
}, nil)
eng := New(mockRunner, hclog.NewNullLogger())
pod, err := eng.GetPod(ctx, &res)
require.NoError(t, err)
require.Equal(t, pod.Namespace, namespace)
require.Equal(t, pod.Name, podName)
})
//
t.Run("GetPod from DaemonSet when Pod does not exist", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
res := data.K8sResource{
Kind: data.KindDaemonSet,
Name: daemonSetName,
Namespace: namespace,
}
mockRunner := NewMockNetAssertTestRunner(mockCtrl)
mockRunner.
EXPECT().
GetPodInDaemonSet(ctx, daemonSetName, namespace).
Return(
&corev1.Pod{},
fmt.Errorf("pod not found"),
)
eng := New(mockRunner, hclog.NewNullLogger())
_, err := eng.GetPod(ctx, &res)
require.Error(t, err)
})
}
================================================
FILE: internal/engine/engine_deployment_test.go
================================================
package engine
import (
"context"
"fmt"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"go.uber.org/mock/gomock"
"github.com/controlplaneio/netassert/v2/internal/data"
)
func TestEngine_GetPod_Deployment(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
var (
podName = "foo-pod"
namespace = "default"
deploymentName = "foo-deploy"
)
t.Run("test GetPod from deployment when Pod exists", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
res := data.K8sResource{
Kind: data.KindDeployment,
Name: deploymentName,
Namespace: namespace,
}
mockRunner := NewMockNetAssertTestRunner(mockCtrl)
mockRunner.EXPECT().
GetPodInDeployment(ctx, deploymentName, namespace).
Return(&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: namespace,
},
Spec: corev1.PodSpec{},
Status: corev1.PodStatus{},
}, nil)
eng := New(mockRunner, hclog.NewNullLogger())
pod, err := eng.GetPod(ctx, &res)
require.NoError(t, err)
require.Equal(t, pod.Namespace, namespace)
require.Equal(t, pod.Name, podName)
})
t.Run("test GetPod from deployment when Pod does not exist", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
res := data.K8sResource{
Kind: data.KindDeployment,
Name: deploymentName,
Namespace: namespace,
}
mockRunner := NewMockNetAssertTestRunner(mockCtrl)
mockRunner.
EXPECT().
GetPodInDeployment(ctx, deploymentName, namespace).
Return(
&corev1.Pod{},
fmt.Errorf("pod not found"),
)
eng := New(mockRunner, hclog.NewNullLogger())
_, err := eng.GetPod(ctx, &res)
require.Error(t, err)
})
}
================================================
FILE: internal/engine/engine_mocks_test.go
================================================
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/controlplaneio/netassert/internal/engine (interfaces: NetAssertTestRunner)
// Package engine is a generated GoMock package.
package engine
import (
context "context"
reflect "reflect"
time "time"
gomock "go.uber.org/mock/gomock"
v1 "k8s.io/api/core/v1"
)
// MockNetAssertTestRunner is a mock of NetAssertTestRunner interface.
type MockNetAssertTestRunner struct {
ctrl *gomock.Controller
recorder *MockNetAssertTestRunnerMockRecorder
}
// MockNetAssertTestRunnerMockRecorder is the mock recorder for MockNetAssertTestRunner.
type MockNetAssertTestRunnerMockRecorder struct {
mock *MockNetAssertTestRunner
}
// NewMockNetAssertTestRunner creates a new mock instance.
func NewMockNetAssertTestRunner(ctrl *gomock.Controller) *MockNetAssertTestRunner {
mock := &MockNetAssertTestRunner{ctrl: ctrl}
mock.recorder = &MockNetAssertTestRunnerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockNetAssertTestRunner) EXPECT() *MockNetAssertTestRunnerMockRecorder {
return m.recorder
}
// BuildEphemeralScannerContainer mocks base method.
func (m *MockNetAssertTestRunner) BuildEphemeralScannerContainer(arg0, arg1, arg2, arg3, arg4, arg5 string, arg6 int) (*v1.EphemeralContainer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BuildEphemeralScannerContainer", arg0, arg1, arg2, arg3, arg4, arg5, arg6)
ret0, _ := ret[0].(*v1.EphemeralContainer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// BuildEphemeralScannerContainer indicates an expected call of BuildEphemeralScannerContainer.
func (mr *MockNetAssertTestRunnerMockRecorder) BuildEphemeralScannerContainer(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildEphemeralScannerContainer", reflect.TypeOf((*MockNetAssertTestRunner)(nil).BuildEphemeralScannerContainer), arg0, arg1, arg2, arg3, arg4, arg5, arg6)
}
// BuildEphemeralSnifferContainer mocks base method.
func (m *MockNetAssertTestRunner) BuildEphemeralSnifferContainer(arg0, arg1, arg2 string, arg3 int, arg4 string, arg5 int, arg6 string, arg7 int) (*v1.EphemeralContainer, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BuildEphemeralSnifferContainer", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
ret0, _ := ret[0].(*v1.EphemeralContainer)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// BuildEphemeralSnifferContainer indicates an expected call of BuildEphemeralSnifferContainer.
func (mr *MockNetAssertTestRunnerMockRecorder) BuildEphemeralSnifferContainer(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildEphemeralSnifferContainer", reflect.TypeOf((*MockNetAssertTestRunner)(nil).BuildEphemeralSnifferContainer), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
}
// GetExitStatusOfEphemeralContainer mocks base method.
func (m *MockNetAssertTestRunner) GetExitStatusOfEphemeralContainer(arg0 context.Context, arg1 string, arg2 time.Duration, arg3, arg4 string) (int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetExitStatusOfEphemeralContainer", arg0, arg1, arg2, arg3, arg4)
ret0, _ := ret[0].(int)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetExitStatusOfEphemeralContainer indicates an expected call of GetExitStatusOfEphemeralContainer.
func (mr *MockNetAssertTestRunnerMockRecorder) GetExitStatusOfEphemeralContainer(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExitStatusOfEphemeralContainer", reflect.TypeOf((*MockNetAssertTestRunner)(nil).GetExitStatusOfEphemeralContainer), arg0, arg1, arg2, arg3, arg4)
}
// GetPod mocks base method.
func (m *MockNetAssertTestRunner) GetPod(arg0 context.Context, arg1, arg2 string) (*v1.Pod, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPod", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1.Pod)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPod indicates an expected call of GetPod.
func (mr *MockNetAssertTestRunnerMockRecorder) GetPod(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPod", reflect.TypeOf((*MockNetAssertTestRunner)(nil).GetPod), arg0, arg1, arg2)
}
// GetPodInDaemonSet mocks base method.
func (m *MockNetAssertTestRunner) GetPodInDaemonSet(arg0 context.Context, arg1, arg2 string) (*v1.Pod, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPodInDaemonSet", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1.Pod)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPodInDaemonSet indicates an expected call of GetPodInDaemonSet.
func (mr *MockNetAssertTestRunnerMockRecorder) GetPodInDaemonSet(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodInDaemonSet", reflect.TypeOf((*MockNetAssertTestRunner)(nil).GetPodInDaemonSet), arg0, arg1, arg2)
}
// GetPodInDeployment mocks base method.
func (m *MockNetAssertTestRunner) GetPodInDeployment(arg0 context.Context, arg1, arg2 string) (*v1.Pod, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPodInDeployment", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1.Pod)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPodInDeployment indicates an expected call of GetPodInDeployment.
func (mr *MockNetAssertTestRunnerMockRecorder) GetPodInDeployment(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodInDeployment", reflect.TypeOf((*MockNetAssertTestRunner)(nil).GetPodInDeployment), arg0, arg1, arg2)
}
// GetPodInStatefulSet mocks base method.
func (m *MockNetAssertTestRunner) GetPodInStatefulSet(arg0 context.Context, arg1, arg2 string) (*v1.Pod, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPodInStatefulSet", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1.Pod)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetPodInStatefulSet indicates an expected call of GetPodInStatefulSet.
func (mr *MockNetAssertTestRunnerMockRecorder) GetPodInStatefulSet(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodInStatefulSet", reflect.TypeOf((*MockNetAssertTestRunner)(nil).GetPodInStatefulSet), arg0, arg1, arg2)
}
// LaunchEphemeralContainerInPod mocks base method.
func (m *MockNetAssertTestRunner) LaunchEphemeralContainerInPod(arg0 context.Context, arg1 *v1.Pod, arg2 *v1.EphemeralContainer) (*v1.Pod, string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LaunchEphemeralContainerInPod", arg0, arg1, arg2)
ret0, _ := ret[0].(*v1.Pod)
ret1, _ := ret[1].(string)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
}
// LaunchEphemeralContainerInPod indicates an expected call of LaunchEphemeralContainerInPod.
func (mr *MockNetAssertTestRunnerMockRecorder) LaunchEphemeralContainerInPod(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LaunchEphemeralContainerInPod", reflect.TypeOf((*MockNetAssertTestRunner)(nil).LaunchEphemeralContainerInPod), arg0, arg1, arg2)
}
================================================
FILE: internal/engine/engine_pod_test.go
================================================
package engine
import (
"context"
"fmt"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/controlplaneio/netassert/v2/internal/data"
)
func TestEngine_GetPod_Pod(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
var (
podName = "foo-pod"
namespace = "default"
)
t.Run("test GetPod from statefulSet when Pod exists", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
res := data.K8sResource{
Kind: data.KindPod,
Name: podName,
Namespace: namespace,
}
mockRunner := NewMockNetAssertTestRunner(mockCtrl)
mockRunner.EXPECT().
GetPod(ctx, podName, namespace).
Return(&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: namespace,
},
Spec: corev1.PodSpec{},
Status: corev1.PodStatus{},
}, nil)
eng := New(mockRunner, hclog.NewNullLogger())
pod, err := eng.GetPod(ctx, &res)
require.NoError(t, err)
require.Equal(t, pod.Namespace, namespace)
require.Equal(t, pod.Name, podName)
})
t.Run("GetPod when Pod does not exists", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
res := data.K8sResource{
Kind: data.KindPod,
Name: podName,
Namespace: namespace,
}
mockRunner := NewMockNetAssertTestRunner(mockCtrl)
mockRunner.EXPECT().
GetPod(ctx, podName, namespace).
Return(&corev1.Pod{}, fmt.Errorf("pod does not exist"))
eng := New(mockRunner, hclog.NewNullLogger())
_, err := eng.GetPod(ctx, &res)
require.Error(t, err)
})
}
================================================
FILE: internal/engine/engine_statefulset_test.go
================================================
package engine
import (
"context"
"fmt"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"go.uber.org/mock/gomock"
"github.com/controlplaneio/netassert/v2/internal/data"
)
func TestEngine_GetPod_StatefulSet(t *testing.T) {
mockCtrl := gomock.NewController(t)
t.Cleanup(func() { mockCtrl.Finish() })
var (
podName = "foo-pod"
namespace = "default"
statefulSetName = "foo-statefulset"
)
t.Run("test GetPod from statefulSet when Pod exists", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
res := data.K8sResource{
Kind: data.KindStatefulSet,
Name: statefulSetName,
Namespace: namespace,
}
mockRunner := NewMockNetAssertTestRunner(mockCtrl)
mockRunner.EXPECT().
GetPodInStatefulSet(ctx, statefulSetName, namespace).
Return(&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: namespace,
},
Spec: corev1.PodSpec{},
Status: corev1.PodStatus{},
}, nil)
eng := New(mockRunner, hclog.NewNullLogger())
pod, err := eng.GetPod(ctx, &res)
require.NoError(t, err)
require.Equal(t, pod.Namespace, namespace)
require.Equal(t, pod.Name, podName)
})
t.Run("test GetPod from statefulSet when Pod does not exist", func(t *testing.T) {
t.Parallel()
ctx := context.Background()
res := data.K8sResource{
Kind: data.KindStatefulSet,
Name: statefulSetName,
Namespace: namespace,
}
mockRunner := NewMockNetAssertTestRunner(mockCtrl)
mockRunner.
EXPECT().
GetPodInStatefulSet(ctx, statefulSetName, namespace).
Return(
&corev1.Pod{},
fmt.Errorf("pod not found"),
)
eng := New(mockRunner, hclog.NewNullLogger())
_, err := eng.GetPod(ctx, &res)
require.Error(t, err)
})
}
================================================
FILE: internal/engine/interface.go
================================================
package engine
import (
"context"
"time"
corev1 "k8s.io/api/core/v1"
)
// PodGetter - gets a running Pod from various kubernetes resources
type PodGetter interface {
GetPodInDaemonSet(context.Context, string, string) (*corev1.Pod, error)
GetPodInDeployment(context.Context, string, string) (*corev1.Pod, error)
GetPodInStatefulSet(context.Context, string, string) (*corev1.Pod, error)
GetPod(context.Context, string, string) (*corev1.Pod, error)
}
// EphemeralContainerOperator - various operations related to the ephemeral container(s)
type EphemeralContainerOperator interface {
BuildEphemeralScannerContainer(
name string, // name of the ephemeral container
image string, // image location of the container
targetHost string, // host to connect to
targetPort string, // target Port to connect to
protocol string, // protocol to used for connection
message string, // message to pass to the remote target
attempts int, // Number of attempts
) (*corev1.EphemeralContainer, error)
GetExitStatusOfEphemeralContainer(
ctx context.Context, // context passed to the function
containerName string, // name of the ephemeral container
timeOut time.Duration, // maximum duration to poll for the ephemeral container status
podName string, // name of the pod that houses the ephemeral container
podNamespace string, // namespace of the pod that houses the ephemeral container
) (int, error)
BuildEphemeralSnifferContainer(
name string, // name of the ephemeral container
image string, // image location of the container
search string, // search for this string in the captured packet
snapLen int, // snapLength to capture
protocol string, // protocol to capture
numberOfmatches int, // no. of matches
intFace string, // the network interface to read the packets from
timeoutSec int, // timeout for the ephemeral container in seconds
) (*corev1.EphemeralContainer, error)
LaunchEphemeralContainerInPod(
ctx context.Context, // the context
pod *corev1.Pod, // the target Pod
ec *corev1.EphemeralContainer, // the ephemeralContainer that needs to be injected to the Pod
) (*corev1.Pod, string, error)
}
// NetAssertTestRunner - runs netassert test case(s)
type NetAssertTestRunner interface {
EphemeralContainerOperator
PodGetter
}
================================================
FILE: internal/engine/run_tcp.go
================================================
package engine
import (
"context"
"fmt"
"strconv"
"time"
"github.com/controlplaneio/netassert/v2/internal/data"
"github.com/controlplaneio/netassert/v2/internal/kubeops"
corev1 "k8s.io/api/core/v1"
)
// RunTCPTest - runs a TCP test
func (e *Engine) RunTCPTest(
ctx context.Context, // context information
te *data.Test, // test case we want to run
scannerContainerName string, // name of the scanner container to use
scannerContainerImage string, // docker image location of the scanner container image
suffixLength int, // length of random string that will be appended to the ephemeral container name
) error {
var (
dstPod *corev1.Pod
targetHost string
)
if te.Dst.Host != nil && te.Dst.K8sResource != nil {
return fmt.Errorf("both Dst.Host and Dst.K8sResource cannot be set at the same time")
}
if te.Dst.Host == nil && te.Dst.K8sResource == nil {
return fmt.Errorf("both Dst.Host and Dst.K8sResource are nil")
}
if scannerContainerName == "" {
return fmt.Errorf("scannerContainerName parameter cannot be empty string")
}
if scannerContainerImage == "" {
return fmt.Errorf("scannerContainerImage parameter cannot be empty string")
}
e.Log.Info("🟢 Running TCP test", "Name", te.Name)
srcPod, err := e.GetPod(ctx, te.Src.K8sResource)
if err != nil {
return err
}
// if Destination K8sResource is not set to nil
if te.Dst.K8sResource != nil {
// we need to find a running Pod with IP Address in the Dst K8sResource
dstPod, err = e.GetPod(ctx, te.Dst.K8sResource)
if err != nil {
return err
}
targetHost = dstPod.Status.PodIP
} else {
targetHost = te.Dst.Host.Name
}
// build ephemeral container with details of the IP addresses
msg, err := kubeops.NewUUIDString()
if err != nil {
return fmt.Errorf("unable to genereate random UUID for test %s: %w", te.Name, err)
}
debugContainer, err := e.Service.BuildEphemeralScannerContainer(
scannerContainerName+"-"+kubeops.RandString(suffixLength),
scannerContainerImage,
targetHost,
strconv.Itoa(te.TargetPort),
string(te.Protocol),
msg,
te.Attempts,
)
if err != nil {
return fmt.Errorf("unable to build ephemeral scanner container for test %s: %w", te.Name, err)
}
// run the ephemeral/debug container
// grab the exit code
// make sure that the exit code matches the one that is specified in the test
srcPod, ephContainerName, err := e.Service.LaunchEphemeralContainerInPod(ctx, srcPod, debugContainer)
if err != nil {
return fmt.Errorf("ephemeral container launch failed for test %s: %w", te.Name, err)
}
err = e.CheckExitStatusOfEphContainer(
ctx,
ephContainerName,
te.Name,
srcPod.Name,
srcPod.Namespace,
time.Duration(te.TimeoutSeconds)*time.Second,
te.ExitCode,
)
if err != nil {
return err
}
te.Pass = true // set the test as pass
return nil
}
// CheckExitStatusOfEphContainer - returns an error if exit code of the ephemeral container does not match expExitCode
func (e *Engine) CheckExitStatusOfEphContainer(
ctx context.Context, // context to pass to our function
ephContainerName string, // name of the ephemeral container
testCaseName string, // name of the test case
podName string, // name of the pod that houses the ephemeral container
podNamespace string, // namespace of the pod that houses the ephemeral container
timeout time.Duration, // timeout for the exit status to reach the desired exit code
expExitCode int, // expected exit code from the ephemeral container
) error {
containerExitCode, err := e.Service.GetExitStatusOfEphemeralContainer(
ctx,
ephContainerName,
timeout,
podName,
podNamespace,
)
if err != nil {
return fmt.Errorf("failed to get exit code of the ephemeral container %s for test %s: %w",
ephContainerName, testCaseName, err)
}
e.Log.Info("Got exit code from ephemeral container",
"testName", testCaseName,
"exitCode", containerExitCode,
"container", ephContainerName,
)
if containerExitCode != expExitCode {
e.Log.Error("Got exit code from ephemeral container",
"testName", testCaseName,
"exitCode", containerExitCode,
"expectedExitCode", expExitCode,
"container", ephContainerName,
)
return fmt.Errorf("ephemeral container %s exit code for test %v is %v instead of %v",
ephContainerName, testCaseName, containerExitCode, expExitCode)
}
return nil
}
================================================
FILE: internal/engine/run_tcp_test.go
================================================
package engine
import (
"context"
"fmt"
"strings"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
"go.uber.org/mock/gomock"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/controlplaneio/netassert/v2/internal/data"
)
var sampleTest = `
- name: busybox-deploy-to-echoserver-deploy
type: k8s
protocol: tcp
targetPort: 8080
timeoutSeconds: 20
attempts: 3
exitCode: 0
src:
k8sResource:
kind: deployment
name: busybox
namespace: busybox
dst:
k8sResource:
kind: deployment
name: echoserver
namespace: echoserver
`
func TestEngine_RunTCPTest(t *testing.T) {
t.Run("error when BuildEphemeralScannerContainer fails", func(t *testing.T) {
r := require.New(t)
ctx := context.Background()
testCases, err := data.NewFromReader(strings.NewReader(sampleTest))
r.Nil(err)
// we only expect a single testCase to be present
r.Equal(len(testCases), 1)
tc := testCases[0]
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockRunner := NewMockNetAssertTestRunner(mockCtrl)
mockRunner.EXPECT().
GetPodInDeployment(ctx, tc.Src.K8sResource.Name, tc.Src.K8sResource.Namespace).
Return(&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "busybox",
Namespace: "busybox",
},
Spec: corev1.PodSpec{},
Status: corev1.PodStatus{},
}, nil)
mockRunner.EXPECT().
GetPodInDeployment(ctx, tc.Dst.K8sResource.Name, tc.Dst.K8sResource.Namespace).
Return(&corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "echoserver",
Namespace: "echoserver",
},
Spec: corev1.PodSpec{},
Status: corev1.PodStatus{},
}, nil)
mockRunner.EXPECT().
BuildEphemeralScannerContainer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any(), gomock.Any()).
Return(&corev1.EphemeralContainer{},
fmt.Errorf("failed to build ephemeral scanner container"))
eng := New(mockRunner, hclog.NewNullLogger())
err = eng.RunTCPTest(ctx, testCases[0],
"scanner-container-name",
"scanner-container-image", 7)
r.Error(err)
wantErrMsg := `unable to build ephemeral scanner container for test ` + tc.Name
r.Contains(err.Error(), wantErrMsg)
})
}
================================================
FILE: internal/engine/run_udp.go
================================================
package engine
import (
"context"
"fmt"
"strconv"
"time"
"github.com/controlplaneio/netassert/v2/internal/data"
"github.com/controlplaneio/netassert/v2/internal/kubeops"
corev1 "k8s.io/api/core/v1"
)
const (
defaultNetInt = `eth0` // default network interface
defaultSnapLen = 1024 // default size of the packet snap length
ephemeralContainersExtraSeconds = 23 // fixed extra time given for the ephemeral containers to come online
attemptsMultiplier = 3 // increase the attempts to ensure that we send three times the packets
)
// RunUDPTest - runs a UDP test
func (e *Engine) RunUDPTest(
ctx context.Context, // context information
te *data.Test, // the test case to run
snifferContainerSuffix string, // name of the sniffer container to use
snifferContainerImage string, // image location of the sniffer Container
scannerContainerSuffix string, // name of the scanner container to use
scannerContainerImage string, // image location of the scanner container
suffixLength int, // length of string that will be generated and appended to the container name
networkInterface string, // name of the network interface that will be used for packet capturing
) error {
if te == nil {
return fmt.Errorf("test case is nil object")
}
if snifferContainerSuffix == "" {
return fmt.Errorf("snifferContainerSuffix parameter cannot be empty string")
}
if snifferContainerImage == "" {
return fmt.Errorf("snifferContainerImage parameter cannot be empty string")
}
if scannerContainerSuffix == "" {
return fmt.Errorf("scannerContainerSuffix parameter cannot be empty string")
}
if scannerContainerImage == "" {
return fmt.Errorf("scannerContainerImage parameter cannot be empty string")
}
// we only run UDP tests here
if te.Protocol != data.ProtocolUDP {
return fmt.Errorf("test case protocol is set to %q, this function only supports %q",
te.Protocol, data.ProtocolUDP)
}
// te is already validate as validation is done at the Unmarshalling of the resource
// validation ensures that for the time being the src holds a type of k8sResource
// check if the te is not nil
// check the value of te.Type and ensure that its k8s
// check if the te.Protocol is tcp or udp
// if the protocol is tcp
// find a running Pod in the src resource
var (
srcPod, dstPod *corev1.Pod
targetHost string
err error
)
// as we cannot inject ephemeral container when the Dst type is Host, we will
// return an error
if te.Dst.Host != nil {
return fmt.Errorf("%q: dst should not contain host object when protocol is %s", te.Name, te.Protocol)
}
if te.Dst.K8sResource == nil {
return fmt.Errorf("%q: dst should contain non-nil k8sResource object", te.Name)
}
e.Log.Info("🟢 Running UDP test", "Name", te.Name)
// name of the network interface that will be used for packet capturing
// if none is set then we used the default one i.e. eth0
if networkInterface == "" {
networkInterface = defaultNetInt
}
// find a running Pod represented by the src.K8sResource object
srcPod, err = e.GetPod(ctx, te.Src.K8sResource)
if err != nil {
return fmt.Errorf("unable to get source pod for test %s: %w", te.Name, err)
}
// find a running Pod in the destination kubernetes object
dstPod, err = e.GetPod(ctx, te.Dst.K8sResource)
if err != nil {
return err
}
targetHost = dstPod.Status.PodIP
msg, err := kubeops.NewUUIDString()
if err != nil {
return fmt.Errorf("unable to genereate random UUID for test %s: %w", te.Name, err)
}
// we now have both the source and the destination object, we need to ensure that we first
// inject the sniffer into the Destination Pod
snifferEphemeralContainer, err := e.Service.BuildEphemeralSnifferContainer(
snifferContainerSuffix+"-"+kubeops.RandString(suffixLength),
snifferContainerImage,
msg,
defaultSnapLen,
string(te.Protocol),
te.Attempts,
networkInterface,
te.TimeoutSeconds,
)
if err != nil {
return fmt.Errorf("failed to build sniffer ephemeral container for test %s: %w", te.Name, err)
}
// we now build the scanner container
scannerEphemeralContainer, err := e.Service.BuildEphemeralScannerContainer(
scannerContainerSuffix+"-"+kubeops.RandString(suffixLength),
scannerContainerImage,
targetHost,
strconv.Itoa(te.TargetPort),
string(te.Protocol),
msg,
te.Attempts*attemptsMultiplier,
)
if err != nil {
return fmt.Errorf("unable to build ephemeral scanner container for test %s: %w", te.Name, err)
}
// run the ephemeral containers on dst Pod first and then the source Pod
dstPod, snifferContainerName, err := e.Service.LaunchEphemeralContainerInPod(ctx, dstPod,
snifferEphemeralContainer)
if err != nil {
return fmt.Errorf("sniffer ephermal container launch failed for test %s: %w", te.Name, err)
}
// run the ephemeral scanner container in the source Pod after we have
// launched the sniffer and the sniffer container is ready
_, scannerContainerName, err := e.Service.LaunchEphemeralContainerInPod(ctx, srcPod, scannerEphemeralContainer)
if err != nil {
return fmt.Errorf("scanner ephemeral container launch failed for test %s: %w", te.Name, err)
}
// sniffer is successfully injected into the dstPod, now we check the exit code
exitCodeSnifferCtr, err := e.Service.GetExitStatusOfEphemeralContainer(
ctx,
snifferContainerName,
time.Duration(te.TimeoutSeconds+ephemeralContainersExtraSeconds)*time.Second,
dstPod.Name,
dstPod.Namespace,
)
if err != nil {
return fmt.Errorf("failed to get exit code of the sniffer ephemeral container %s for test %s: %w",
snifferContainerName, te.Name, err)
}
e.Log.Info("Got exit code from ephemeral sniffer container",
"testName", te.Name,
"exitCode", exitCodeSnifferCtr,
"containerName", snifferContainerName)
if exitCodeSnifferCtr != te.ExitCode {
return fmt.Errorf("ephemeral sniffer container %s exit code for test %v is %v instead of %d",
snifferContainerName, te.Name, exitCodeSnifferCtr, te.ExitCode)
}
// get the exit status of the scanner container
exitCodeScanner, err := e.Service.GetExitStatusOfEphemeralContainer(
ctx, scannerContainerName,
time.Duration(te.TimeoutSeconds+ephemeralContainersExtraSeconds)*time.Second,
srcPod.Name,
srcPod.Namespace,
)
if err != nil {
return fmt.Errorf("failed to get exit code of the scanner ephemeral container %s for test %s: %w",
scannerContainerName, te.Name, err)
}
e.Log.Info("Got exit code from ephemeral scanner container",
"testName", te.Name,
"exitCode", exitCodeScanner,
"containerName", scannerContainerName)
// for UDP scanning the exit code of the scanner is always zero
// as UDP is connectionless
if exitCodeScanner != 0 {
return fmt.Errorf("ephemeral scanner container %s exit code for test %v is %v instead of 0",
scannerContainerName, te.Name, exitCodeScanner)
}
te.Pass = true // mark test as pass
return nil
}
================================================
FILE: internal/kubeops/client.go
================================================
package kubeops
import (
"fmt"
"github.com/hashicorp/go-hclog"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// generateKubernetsClient - generates a kubernetes client set from default file locations
// first checks KUBECONFIG environment variable
// then the Home directory of the user
// then finally it checks if the program is running in a Pod
func generateKubernetesClient() (kubernetes.Interface, error) {
config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
clientcmd.NewDefaultClientConfigLoadingRules(), nil,
).ClientConfig()
if err != nil {
if restConfig, err := rest.InClusterConfig(); err == nil {
config = restConfig
} else {
return nil, fmt.Errorf("failed to build kubeconfig or InClusterConfig: %w", err)
}
}
k8sClient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client: %w", err)
}
return k8sClient, nil
}
// genK8sClientFromKubeConfigFile - Generates kubernetes config file from user supplied kubeConfigPath file
func genK8sClientFromKubeConfigFile(kubeConfigPath string) (kubernetes.Interface, error) {
config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
if err != nil {
return nil, fmt.Errorf("failed to build kubeconfig from file %s: %w", kubeConfigPath, err)
}
k8sClient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create kubernetes client from file %s: %w", kubeConfigPath, err)
}
return k8sClient, nil
}
// Service exposes the operations on various K8s resources
type Service struct {
Client kubernetes.Interface // kubernetes client-set
Log hclog.Logger // logger embedded in our service
}
// New - builds a new Service that can interface with Kubernetes
func New(client kubernetes.Interface, l hclog.Logger) *Service {
return &Service{
Client: client,
Log: l,
}
}
// NewDefaultService - builds a new Service looking for a KubeConfig in various locations
func NewDefaultService(l hclog.Logger) (*Service, error) {
clientSet, err := generateKubernetesClient()
if err != nil {
return &Service{}, err
}
return &Service{
Client: clientSet,
Log: l,
}, nil
}
// NewServiceFromKubeConfigFile - builds a new Service using KubeConfig file location passed by the caller
func NewServiceFromKubeConfigFile(kubeConfigPath string, l hclog.Logger) (*Service, error) {
clientSet, err := genK8sClientFromKubeConfigFile(kubeConfigPath)
if err != nil {
return &Service{}, err
}
return &Service{
Client: clientSet,
Log: l,
}, nil
}
================================================
FILE: internal/kubeops/container_test.go
================================================
package kubeops
import (
"context"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
func TestLaunchEphemeralContainerInPod_InvalidEphemeralContainer(t *testing.T) {
// create a fake pod with no ephemeral containers
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "fake-pod",
Namespace: "default",
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "nginx",
Image: "nginx",
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 80,
Protocol: corev1.ProtocolTCP,
},
},
},
},
},
}
ctx := context.Background()
fakeClient := fake.NewSimpleClientset(pod)
// initialise our Service
svc := Service{
Client: fakeClient,
Log: hclog.NewNullLogger(),
}
t.Run("Inject valid ephemeral container", func(t *testing.T) {
r := require.New(t)
ephContainerName := "foo-container"
// create an invalid ephemeral container
ec, err := svc.BuildEphemeralSnifferContainer(
ephContainerName, // name of the ephemeral container
"foo:12", // image location of the container
"foo", // search for this string in the captured packet
1024, // snapLength to capture
"tcp", // protocol to capture
3, // no. of matches that triggers an exit with status 0
"eth0", // the network interface to read the packets from
3, // timeout for the ephemeral container
)
r.NoError(err, "failed to Build Ephemeral Container ")
pod, _, err := svc.LaunchEphemeralContainerInPod(ctx, pod, ec)
r.NoError(err)
gotName := pod.Spec.EphemeralContainers[0].EphemeralContainerCommon.Name
r.Equal(ephContainerName, gotName)
r.Equal(len(pod.Spec.EphemeralContainers), 1)
})
t.Run("inject valid ephemeral container in a Pod that does not exist", func(t *testing.T) {
r := require.New(t)
tmpPod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
Namespace: "default",
},
}
// create an invalid ephemeral container
ec, err := svc.BuildEphemeralSnifferContainer(
"eph1", // name of the ephemeral container
"foo:1.2", // image location of the container
"foo", // search for this string in the captured packet
1024, // snapLength to capture
"tcp", // protocol to capture
3, // no. of matches that triggers an exit with status 0
"eth0", // the network interface to read the packets from
3, // timeout for the ephemeral container
)
r.NoError(err, "failed to Build Ephemeral Container ")
gotPod, _, err := svc.LaunchEphemeralContainerInPod(ctx, tmpPod, ec)
r.Nil(gotPod)
r.Error(err)
r.Contains(err.Error(), `pods "test-pod" not found`)
})
}
================================================
FILE: internal/kubeops/containers.go
================================================
package kubeops
import (
"context"
"encoding/json"
"fmt"
"strconv"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/utils/pointer"
)
// LaunchEphemeralContainerInPod - Launches an ephemeral container in running Pod
func (svc *Service) LaunchEphemeralContainerInPod(
ctx context.Context, // the context
pod *corev1.Pod, // the target Pod
ec *corev1.EphemeralContainer, // the ephemeralContainer that needs to be injected to the Pod
) (*corev1.Pod, string, error) {
// grab the JSON of the original Pod
originalPodJSON, err := json.Marshal(pod)
if err != nil {
svc.Log.Error("Unable to marshal the original pod into JSON object")
return nil, "", err
}
podCopy := pod.DeepCopy()
// Add the ephemeral container to the Pod spec of existing ephemeral containers
podCopy.Spec.EphemeralContainers = append(podCopy.Spec.EphemeralContainers, *ec)
podCopyJSON, err := json.Marshal(podCopy)
if err != nil {
return nil, "", err
}
patch, err := strategicpatch.CreateTwoWayMergePatch(originalPodJSON, podCopyJSON, pod)
if err != nil {
return nil, "", err
}
svc.Log.Debug("Generated JSON patch for the Pod", "JSONPatch", string(patch))
svc.Log.Info("Patching Pod", "Pod", pod.Name, "Namespace", pod.Namespace, "Container", ec.Name)
// we now patch the Pod with ephemeral container
newPod, err := svc.Client.CoreV1().Pods(pod.Namespace).Patch(
ctx,
pod.Name,
types.StrategicMergePatchType,
patch,
metav1.PatchOptions{},
"ephemeralcontainers",
)
if err != nil {
return nil, "", err
}
return newPod, ec.Name, nil
}
// BuildEphemeralSnifferContainer - builds an ephemeral sniffer container
func (svc *Service) BuildEphemeralSnifferContainer(
name string, // name of the ephemeral container
image string, // image location of the container
search string, // search for this string in the captured packet
snapLen int, // snapLength to capture
protocol string, // protocol to capture
numberMatches int, // no. of matches that triggers an exit with status 0
intFace string, // the network interface to read the packets from
timeoutSec int, // timeout for the ephemeral container
) (*corev1.EphemeralContainer, error) {
ec := corev1.EphemeralContainer{
EphemeralContainerCommon: corev1.EphemeralContainerCommon{
Name: name,
Image: image,
Env: []corev1.EnvVar{
{
Name: "TIMEOUT_SECONDS",
Value: strconv.Itoa(timeoutSec),
},
{
Name: "IFACE",
Value: intFace,
},
{
Name: "SNAPLEN",
Value: strconv.Itoa(snapLen),
},
{
Name: "SEARCH_STRING",
Value: search,
},
{
Name: "PROTOCOL",
Value: protocol,
},
{
Name: "MATCHES",
Value: strconv.Itoa(numberMatches),
},
},
Stdin: false,
StdinOnce: false,
TTY: false,
SecurityContext: &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{"NET_RAW"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
RunAsNonRoot: pointer.Bool(true),
},
},
// empty string forces the container to run in the namespace of the Pod, rather than the container
// this is default value, only added here for readability
TargetContainerName: "",
}
return &ec, nil
}
// BuildEphemeralScannerContainer - builds an ephemeral scanner container
func (svc *Service) BuildEphemeralScannerContainer(
name string, // name of the ephemeral container
image string, // image location of the container
targetHost string, // host to connect to
targetPort string, // target Port to connect to
protocol string, // protocol to used for connection
message string, // message to pass to the remote target
attempts int, // Number of attempts
) (*corev1.EphemeralContainer, error) {
ec := corev1.EphemeralContainer{
EphemeralContainerCommon: corev1.EphemeralContainerCommon{
Name: name,
Image: image,
Env: []corev1.EnvVar{
{
Name: "TARGET_HOST",
Value: targetHost,
},
{
Name: "TARGET_PORT",
Value: targetPort,
},
{
Name: "PROTOCOL",
Value: protocol,
},
{
Name: "MESSAGE",
Value: message,
},
{
Name: "ATTEMPTS",
Value: strconv.Itoa(attempts),
},
},
Stdin: false,
StdinOnce: false,
TTY: false,
SecurityContext: &corev1.SecurityContext{
RunAsNonRoot: pointer.Bool(true),
AllowPrivilegeEscalation: pointer.Bool(false),
},
},
// empty string forces the container to run in the namespace of the Pod, rather than the container
// this is default value, only added here for readability
TargetContainerName: "",
}
return &ec, nil
}
// GetExitStatusOfEphemeralContainer - returns the exit status of an EphemeralContainer in a pod
func (svc *Service) GetExitStatusOfEphemeralContainer(
ctx context.Context, // the context
containerName string, // name of the ephemeral container
timeOut time.Duration, // maximum duration to poll for the container status
podName string, // name of the pod which has the ephemeral container
podNamespace string, // namespace of the pod which has the ephemeral container
) (int, error) {
// we only want the Pods that are in running state
// and are in specific namespace
fieldSelector := fields.AndSelectors(
fields.OneTermEqualSelector(
"status.phase",
"Running",
),
fields.OneTermEqualSelector(
"metadata.name",
podName,
),
fields.OneTermEqualSelector(
"metadata.namespace",
podNamespace,
),
)
podWatcher, err := svc.Client.CoreV1().Pods(podNamespace).Watch(ctx, metav1.ListOptions{
TypeMeta: metav1.TypeMeta{},
FieldSelector: fieldSelector.String(),
})
defer func() {
if podWatcher != nil {
podWatcher.Stop()
}
}()
if err != nil {
return -1, err
}
timer := time.NewTimer(timeOut)
defer func() {
if ok := timer.Stop(); !ok {
svc.Log.Info("Unable to close the timer channel")
}
}()
for {
select {
case event := <-podWatcher.ResultChan():
pod, ok := event.Object.(*corev1.Pod)
// if this is not a pod object, then we skip
if !ok {
break // breaks from the select
}
svc.Log.Debug("Polling the status of ephemeral container",
"pod", pod.Name,
"namespace", pod.Namespace,
"container", containerName,
)
for _, v := range pod.Status.EphemeralContainerStatuses {
if v.Name != containerName {
continue
}
if v.State.Waiting != nil {
svc.Log.Debug("Container state", "container", containerName, "state", "Waiting")
continue
}
if v.State.Running != nil {
svc.Log.Debug("Container state", "container", containerName, "state", "Running")
continue
}
if v.State.Terminated != nil {
svc.Log.Info("Ephemeral container has finished executing", "name", containerName)
svc.Log.Debug("", "ContainerName", v.Name)
svc.Log.Debug("", "ExitCode", v.State.Terminated.ExitCode)
svc.Log.Debug("", "ContainerID", v.State.Terminated.ContainerID)
svc.Log.Debug("", "FinishedAt", v.State.Terminated.FinishedAt)
svc.Log.Debug("", "StartedAt", v.State.Terminated.StartedAt)
svc.Log.Debug("", "Message", v.State.Terminated.Message)
svc.Log.Debug("", "Reason", v.State.Terminated.Reason)
svc.Log.Debug("", "Signal", v.State.Terminated.Signal)
return int(v.State.Terminated.ExitCode), nil
}
}
case <-timer.C:
return -1, fmt.Errorf("container %v did not reach termination state in %v seconds", containerName, timeOut.Seconds())
case <-ctx.Done():
return -1, fmt.Errorf("process was cancelled: %w", ctx.Err())
}
}
}
================================================
FILE: internal/kubeops/daemonset.go
================================================
package kubeops
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
)
// GetPodInDaemonSet - Returns a running Pod with IP address in a DaemonSet
func (svc *Service) GetPodInDaemonSet(ctx context.Context, name, namespace string) (*corev1.Pod, error) {
// check if the DaemonSet actually exists
ds, err := svc.Client.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if ds.Status.NumberAvailable == 0 {
svc.Log.Error("Found zero available Pods", "DaemonSet", name, "Namespace", namespace)
return nil, fmt.Errorf("zero Pods are available in the daemonset %s", name)
}
fieldSelector := fields.OneTermEqualSelector(
"status.phase",
"Running",
).String()
// Grab the labels from the DaemonSet selector
podLabels := labels.FormatLabels(ds.Spec.Selector.MatchLabels)
pods, err := svc.Client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: podLabels,
FieldSelector: fieldSelector,
})
if err != nil {
svc.Log.Error("Unable to list Pods", "namespace", namespace, "error", err)
return nil, fmt.Errorf("unable to list Pods in namespace %s: %w", namespace, err)
}
pod, err := getRandomPodFromPodList(ds, pods)
if err != nil {
return nil, fmt.Errorf("unable to find any Pod owned by daemonset %s in namespace %s",
name, namespace)
}
return pod, nil
}
================================================
FILE: internal/kubeops/daemonset_test.go
================================================
package kubeops
import (
"context"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
)
func createDaemonSet(client kubernetes.Interface,
name, namespace string,
numberAvailable int32,
t *testing.T,
) *appsv1.DaemonSet {
ds := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
UID: uuid.NewUUID(),
},
Status: appsv1.DaemonSetStatus{
NumberAvailable: numberAvailable,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "foobar",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "foobar",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "nginx-ofcourse",
Image: "nginx:latest",
},
},
},
},
},
}
ctx := context.Background()
obj, err := client.AppsV1().DaemonSets(namespace).Create(ctx, ds, metav1.CreateOptions{})
if err != nil {
t.Fatal("failed to create daemon set", err)
}
return obj
}
func deaemonSetPod(
client kubernetes.Interface,
ownerDaemonSet *appsv1.DaemonSet,
podName string,
podNamespace string,
t *testing.T,
podPhase corev1.PodPhase,
ipAddress string,
) *corev1.Pod {
if ownerDaemonSet == nil {
t.Fatal("ownerDaemonSet is set to nil")
}
// Create the Pod with the appropriate owner reference
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: podNamespace,
Labels: ownerDaemonSet.Spec.Selector.MatchLabels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(ownerDaemonSet, appsv1.SchemeGroupVersion.WithKind("DaemonSet")),
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "has-to-be-nginx",
Image: "nginx:latest",
},
},
},
Status: corev1.PodStatus{
Phase: podPhase,
PodIP: ipAddress,
},
}
// Create the Pod in the Kubernetes cluster.
pod, err := client.CoreV1().Pods(podNamespace).Create(
context.Background(), pod, metav1.CreateOptions{},
)
if err != nil {
t.Fatalf("unable to create pod: %v", err)
}
return pod
}
func TestGetPodInDaemonSet(t *testing.T) {
t.Parallel()
testCases := []struct {
name string
dsName string // daemonSet name
namespace string // namespace for both the Pod and daemonset
createPod bool
createDS bool // should the daemonset be created
ipAddress string
podPhase corev1.PodPhase
wantErr bool
numberAvailable int32
errMsg string
}{
{
name: "when both the DaemonSet and Pod do not exist",
dsName: "foo",
namespace: "bar",
createPod: false,
createDS: false,
ipAddress: "192.168.168.33",
podPhase: corev1.PodRunning,
wantErr: true,
numberAvailable: 1,
errMsg: `daemonsets.apps "foo" not found`,
},
{
name: "when the DaemonSet exists but the Pod does not",
dsName: "foo",
namespace: "bar",
createPod: false,
createDS: true,
ipAddress: "192.168.168.33",
podPhase: corev1.PodRunning,
wantErr: true,
numberAvailable: 1,
errMsg: `unable to find any Pod`,
},
{
name: "when the DaemonSet and Pod exists but the Pod is not in running state",
dsName: "foo",
namespace: "bar",
createPod: true,
createDS: true,
ipAddress: "192.168.168.33",
podPhase: corev1.PodPending,
wantErr: true,
numberAvailable: 1,
errMsg: `unable to find any Pod`,
},
{
name: "when the DaemonSet and Pod exists and the Pod is in running state",
dsName: "foo",
namespace: "bar",
createPod: true,
createDS: true,
ipAddress: "192.168.168.33",
podPhase: corev1.PodRunning,
wantErr: false,
numberAvailable: 1,
errMsg: ``,
},
{
name: "when the DaemonSet and Pod exists, the Pod is in running state but does not have valid IP address",
dsName: "foo",
namespace: "bar",
createPod: true,
createDS: true,
ipAddress: "",
podPhase: corev1.PodRunning,
wantErr: true,
numberAvailable: 1,
errMsg: `unable to find any Pod owned by daemonset`,
},
{
name: "when the DaemonSet exists, but has Status.NumberAvailable set to zero",
dsName: "foo",
namespace: "bar",
createPod: false,
createDS: true,
ipAddress: "",
podPhase: corev1.PodRunning,
wantErr: true,
numberAvailable: 0,
errMsg: `zero Pods are available in the daemonset`,
},
}
for _, testCase := range testCases {
tc := testCase
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
r := require.New(t)
k8sClient := fake.NewSimpleClientset()
var ds *appsv1.DaemonSet // daemon set
if tc.createDS {
tmpDs := createDaemonSet(k8sClient, tc.dsName, tc.namespace, tc.numberAvailable, t)
r.NotNil(tmpDs)
ds = tmpDs
}
if tc.createPod {
_ = deaemonSetPod(k8sClient, ds, "foo-pod", tc.namespace, t, tc.podPhase, tc.ipAddress)
}
svc := New(k8sClient, hclog.NewNullLogger())
gotPod, err := svc.GetPodInDaemonSet(context.Background(), tc.dsName, tc.namespace)
if !tc.wantErr { // if we do not want an error then
r.NoError(err)
if tc.createPod {
r.Equal(gotPod.Status.PodIP, tc.ipAddress)
}
return
}
if tc.wantErr {
r.Error(err)
if tc.errMsg != "" {
r.Contains(err.Error(), tc.errMsg)
}
}
})
}
}
================================================
FILE: internal/kubeops/deployment.go
================================================
package kubeops
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
)
// GetPodInDeployment - Returns a running Pod in a deployment
func (svc *Service) GetPodInDeployment(ctx context.Context, name, namespace string) (*corev1.Pod, error) {
// check if the deployment actually exists
deploy, err := svc.Client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, err
}
// get the list of replicaset in the current namespace
replicaSets, err := svc.Client.AppsV1().ReplicaSets(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
var rsList []*appsv1.ReplicaSet
var rs *appsv1.ReplicaSet
// Loop through the ReplicaSets and check if they are owned by the deployment
for index := range replicaSets.Items {
if metav1.IsControlledBy(&replicaSets.Items[index], deploy) {
r := replicaSets.Items[index]
if *r.Spec.Replicas == 0 {
svc.Log.Info("replicaSet size set to zero",
"name", r.Name,
"deployment", name,
"namespace", namespace)
continue
}
rsList = append(rsList, &r)
}
}
if len(rsList) == 0 {
return nil, fmt.Errorf("could not find the replicaSet with replica count >=1 owned by the "+
"deployment %s in namespace %s", name, namespace)
}
// we use the first replicaset that has zero size
rs = rsList[0]
if rs == nil || rs.Spec.Replicas == nil {
return nil, fmt.Errorf("could not find the replicaSet owned by the deployment %s in namespace %s",
name, namespace)
}
if *rs.Spec.Replicas < 1 {
svc.Log.Info("replicaSet size set to zero",
"deployment", name,
"namespace", namespace)
return nil, fmt.Errorf("deployment %s in namespace %s has repliaset size set to zero",
name, namespace)
}
// we only want the Pods that are in running state
fieldSelector := fields.OneTermEqualSelector(
"status.phase",
"Running",
)
// grab the labels associated with the ReplicaSet object
// we only want to select the Pods, whose labels match that of the parent replicaSet
// selector, err := labels.ValidatedSelectorFromSet(rs.Labels)
selector, err := labels.ValidatedSelectorFromSet(rs.Spec.Selector.MatchLabels)
if err != nil {
return nil, err
}
// we now get a list of pods and find the ones owned by the replicaset
pods, err := svc.Client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: selector.String(),
FieldSelector: fieldSelector.String(),
})
if err != nil {
return nil, err
}
pod, err := getRandomPodFromPodList(rs, pods)
if err != nil {
return nil, fmt.Errorf("unable to find any Pod associated with deployment %s in namespace %s: %w",
name, namespace, err)
}
svc.Log.Info("Found Pod", "Parent-deployment", name, "Pod", pod.Name,
"Namespace", pod.Namespace, "IP", pod.Status.PodIP)
return pod, nil
}
================================================
FILE: internal/kubeops/deployment_test.go
================================================
package kubeops
import (
"context"
"fmt"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/utils/pointer"
"k8s.io/client-go/kubernetes/fake"
)
func getDeploymentObject(name, namespace string, replicaSize int32) *appsv1.Deployment {
deploymentName := name
imageName := "nginx"
imageTag := "latest"
replicas := pointer.Int32(replicaSize)
// Create the Deployment object
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: namespace,
UID: uuid.NewUUID(),
},
Spec: appsv1.DeploymentSpec{
Replicas: replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": deploymentName,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": deploymentName,
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: imageName,
Image: fmt.Sprintf("%s:%s", imageName, imageTag),
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 80,
Protocol: corev1.ProtocolTCP,
},
},
},
},
},
},
},
}
return deployment
}
// replicaSetWithOwnerSetToDeployment - creates a replicaset spec with owner reference to deploy
func replicaSetWithOwnerSetToDeployment(deploy *appsv1.Deployment, size int32) *appsv1.ReplicaSet {
replicaSet := &appsv1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "replicaset-" + deploy.Name,
Namespace: "default",
Labels: deploy.Spec.Selector.MatchLabels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(deploy, appsv1.SchemeGroupVersion.WithKind("Deployment")),
},
},
Spec: appsv1.ReplicaSetSpec{
Selector: deploy.Spec.Selector,
Template: deploy.Spec.Template,
Replicas: pointer.Int32(size),
},
}
replicaSet.UID = deploy.UID
return replicaSet
}
// podWithOwnerSetToReplicaSet - creates a Pod spec. and sets the ownder reference to rs
func podWithOwnerSetToReplicaSet(rs *appsv1.ReplicaSet) *corev1.Pod {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: rs.Name + "-pod-foo",
Namespace: rs.Namespace,
Labels: rs.Labels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(rs, appsv1.SchemeGroupVersion.WithKind("ReplicaSet")),
},
},
Spec: rs.Spec.Template.Spec,
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
PodIP: "192.168.0.1",
},
}
return pod
}
func TestGetPodInDeployment(t *testing.T) {
r := require.New(t)
// build the new service object
svc := New(fake.NewSimpleClientset(), hclog.NewNullLogger())
name := "deploy1"
namespace := "default"
deploySpec := getDeploymentObject(name, namespace, 1)
ctx := context.Background()
deployObj, err := svc.Client.AppsV1().Deployments(namespace).Create(ctx, deploySpec, metav1.CreateOptions{})
r.NoError(err)
// deployment will not create a replicaSet so when we call GetPodInDeployment, it should
// give us an error specific to that
_, err = svc.GetPodInDeployment(ctx, name, namespace)
r.Error(err)
rsNotFoundMessage := fmt.Sprintf("could not find the replicaSet with replica count >=1 owned "+
"by the deployment %s in namespace %s",
name, namespace)
r.Equal(rsNotFoundMessage, err.Error())
// now we create the replicaSet that is owned by the deployment that has zero replicas
rsSpec := replicaSetWithOwnerSetToDeployment(deployObj, 0)
_, err = svc.Client.AppsV1().ReplicaSets(namespace).Create(ctx, rsSpec, metav1.CreateOptions{})
r.NoError(err, "failed to create replicaSet with size set to zero")
_, err = svc.GetPodInDeployment(ctx, name, namespace)
r.Error(err)
// rsSizeSetToZeroMsg := fmt.Sprintf("deployment %s in namespace %s has repliaset size set to zero",
// name, namespace)
r.Equal(rsNotFoundMessage, err.Error())
// we now modify the existing replicaSet and set the replica size to 1
rsSpec.Spec.Replicas = pointer.Int32(1)
rsObj, err := svc.Client.AppsV1().ReplicaSets(namespace).Update(ctx, rsSpec, metav1.UpdateOptions{})
r.NoError(err, "failed to update replicaset size set 1")
_, err = svc.GetPodInDeployment(ctx, name, namespace)
podNotFound := fmt.Sprintf("unable to find any Pod associated with deployment %s in namespace %s",
name, namespace)
r.Contains(err.Error(), podNotFound)
// we now create Pod that has no IP address set to it
// but is in running stage and is owned by the replicaSet
// this should also trigger an error
podSpec := podWithOwnerSetToReplicaSet(rsObj)
podSpec.Status.PodIP = ""
_, err = svc.Client.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{})
r.NoError(err)
_, err = svc.GetPodInDeployment(ctx, name, namespace)
r.Contains(err.Error(), podNotFound)
// finally we update the Pod to simulate an IP address allocation by CNI
podSpec.Status.PodIP = "192.168.0.1"
createdPodObj, err := svc.Client.CoreV1().Pods(namespace).Update(ctx, podSpec, metav1.UpdateOptions{})
r.NoError(err)
gotPodObj, err := svc.GetPodInDeployment(ctx, name, namespace)
r.NoError(err)
r.Equal(createdPodObj, gotPodObj)
}
================================================
FILE: internal/kubeops/pod.go
================================================
package kubeops
import (
"context"
"fmt"
"log"
"math/rand"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
)
const (
ephContainerGroup = "" // api group which provides the ephemeral container resource
ephContainerVersion = "v1" // api version which provides the ephemeral container resource
ephContainerKind = "Pod" // core API Kind that provides the ephemeral container resource
ephContainerRes = "pods/ephemeralcontainers" // name of the ephemeral container subresource
)
// GetPod - Returns a Running Pod that has an IP address allocated to it
func (svc *Service) GetPod(ctx context.Context, name, namespace string) (*corev1.Pod, error) {
// Get the Pod that matches the name and namespace
pod, err := svc.Client.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
svc.Log.Error("unable to find Pod", "namespace", namespace, "error", err)
return nil, fmt.Errorf("unable to find Pod %s in namespace %s: %w", name, namespace, err)
}
if err != nil {
svc.Log.Error("Unable to find Pod", "namespace", namespace, "error", err)
return nil, err
}
if pod.Status.Phase != corev1.PodRunning {
return nil, fmt.Errorf("pod %s in namespace %s is not in running state: %s",
name,
namespace,
pod.Status.Phase,
)
}
if pod.Status.PodIP == "" {
return nil, fmt.Errorf("pod %s in namespace %s does not have an IP address",
name,
namespace,
)
}
return pod, nil
}
// getPodFromPodList - returns a random pod from PodList object
func getRandomPodFromPodList(ownerObj metav1.Object, podList *corev1.PodList) (*corev1.Pod, error) {
if ownerObj == nil {
return &corev1.Pod{}, fmt.Errorf("parameter ownerObj cannot be nil")
}
if podList == nil {
return &corev1.Pod{}, fmt.Errorf("parameter podList cannot be nil")
}
var pods []*corev1.Pod
for _, pod := range podList.Items {
tmpPod := pod
if !metav1.IsControlledBy(&tmpPod, ownerObj) {
continue
}
if tmpPod.Status.Phase != corev1.PodRunning {
continue
}
if tmpPod.Status.PodIP == "" {
continue
}
// this pod is owned by the parent object
// the pod is in running state
// the pod also has IP address allocated by CNI
pods = append(pods, &tmpPod)
}
if len(pods) == 0 {
return &corev1.Pod{}, fmt.Errorf("unable to find a Pod")
}
index := rand.Intn(len(pods))
return pods[index], nil
}
// CheckEphemeralContainerSupport - Checks support for ephemeral containers
func (svc *Service) CheckEphemeralContainerSupport(ctx context.Context) error {
// check if the kubernetes server has support for ephemeral containers
found, err := checkResourceSupport(ctx, svc.Client, ephContainerGroup, ephContainerVersion,
ephContainerKind, ephContainerRes)
if err != nil {
return err
}
// we have not found the resource
if !found {
return fmt.Errorf("unable to find K8s resource=%q, Group=%q, Version=%q Kind=%q",
ephContainerRes, ephContainerGroup, ephContainerVersion, ephContainerKind)
}
return nil
}
// checkResourceSupport - Checks support for a specific resource
func checkResourceSupport(
ctx context.Context,
k8sClient kubernetes.Interface,
group, version, kind, resourceName string,
) (bool, error) {
groupVersion := schema.GroupVersion{Group: group, Version: version}
dClient := k8sClient.Discovery()
resourceList, err := dClient.ServerResourcesForGroupVersion(groupVersion.String())
if err != nil {
return false, fmt.Errorf("failed to get API resource list for %q: %w", groupVersion.String(), err)
}
for _, resource := range resourceList.APIResources {
if resource.Kind == kind && resource.Name == resourceName {
return true, nil
}
}
return false, nil
}
// PingHealthEndpoint - pings a single endpoint of the apiServer using HTTP
func (svc *Service) PingHealthEndpoint(ctx context.Context, endpoint string) error {
pingRequest := svc.Client.CoreV1().RESTClient().Get().AbsPath(endpoint)
if err := pingRequest.Do(ctx).Error(); err != nil {
return fmt.Errorf("unable to HTTP ping "+endpoint+" of the API server: %w", err)
}
return nil
}
func (svc *Service) WaitForPodInResourceReady(name, namespace, resourceType string,
poll, timeout time.Duration,
) error {
var fn func(context.Context, string, string) (*corev1.Pod, error)
switch strings.ToLower(resourceType) {
case "deployment":
fn = svc.GetPodInDeployment
case "daemonset":
fn = svc.GetPodInDaemonSet
case "statefulset":
fn = svc.GetPodInStatefulSet
case "pod":
fn = svc.GetPod
default:
return fmt.Errorf("unsupported resource type %q", resourceType)
}
timeOutCh := time.After(timeout)
ticker := time.NewTicker(poll)
trigger := make(chan struct{}, 1)
trigger <- struct{}{}
for {
select {
case <-timeOutCh:
return fmt.Errorf("timed out getting pod for %q - %s/%s, timeout duration=%v", resourceType,
namespace, name, timeout.String())
case <-trigger:
case <-ticker.C:
}
log.Println("polling for object", name, namespace, resourceType)
svc.Log.Info("polling", "name", name, "namespace", namespace)
_, err := fn(context.Background(), name, namespace)
if err == nil {
log.Printf("Found name=%s namespace=%s resourceType=%s", name, namespace, resourceType)
return nil
}
log.Println("error while polling for object, retrying...", name, namespace, resourceType, err)
}
}
================================================
FILE: internal/kubeops/pod_test.go
================================================
package kubeops
import (
"context"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
func TestGetPod(t *testing.T) {
// create a fake clientset that will store state in memory
fakeClient := fake.NewSimpleClientset()
ctx := context.Background()
// initialise our Service
svc := Service{
Client: fakeClient,
Log: hclog.NewNullLogger(),
}
// create a testNamespace and testPod
testNamespace := "foo-ns"
testPodName := "bar-pod"
testPodIP := "192.168.168.100"
testPod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: testPodName,
Namespace: testNamespace,
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
PodIP: testPodIP,
},
}
r := require.New(t)
// create and then add the test pod to the fake clientset
_, err := fakeClient.CoreV1().Pods(testNamespace).Create(context.Background(), testPod, metav1.CreateOptions{})
r.NoError(err, "unable to add test Pod to the fake client")
testCases := []struct {
name string
namespace string
podName string
podIP string
phase corev1.PodPhase
wantErr bool
}{
{
name: "GetPod with valid inputs should return the correct pod",
namespace: testNamespace,
podName: testPodName,
podIP: testPodIP,
phase: corev1.PodRunning,
wantErr: false,
},
{
name: "GetPod should return an error when the pod is not in running state",
namespace: testNamespace,
podName: testPodName,
podIP: testPodIP,
phase: corev1.PodPending,
wantErr: true,
},
{
name: "GetPod should return an error when the pod does not have an IP address",
namespace: testNamespace,
podName: testPodName,
podIP: "",
phase: corev1.PodRunning,
wantErr: true,
},
{
name: "GetPod should return an error when the pod is not found",
namespace: testNamespace,
podName: "this-pod-does-not-exist",
podIP: "",
phase: corev1.PodPending,
wantErr: true,
},
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
// update the test pod with the desired phase and podIP
testPod.Status.Phase = testCase.phase
testPod.Status.PodIP = testCase.podIP
// update the fake clientset with the updated pod that has some fields missing or empty
_, err := fakeClient.CoreV1().Pods(testNamespace).Update(ctx, testPod, metav1.UpdateOptions{})
r.NoError(err, "unable to update the Pod in the fake clientset")
result, err := svc.GetPod(ctx, testCase.podName, testCase.namespace)
if testCase.wantErr {
r.Error(err, "wanted an error, but got nil")
return
}
r.NoError(err, "wanted no error, but got %v", err)
r.NotNil(result, "wanted a pod, but got nil")
r.Equal(testCase.podName, result.Name, "wanted pod name to be %s, but got %s", testCase.podName, result.Name)
})
}
}
================================================
FILE: internal/kubeops/statefulset.go
================================================
package kubeops
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
)
// GetPodInStatefulSet - Returns a running Pod in a statefulset
func (svc *Service) GetPodInStatefulSet(ctx context.Context, name, namespace string) (*corev1.Pod, error) {
// check if the statefulset actually exists
ss, err := svc.Client.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil, fmt.Errorf("unable to find statefulset %s in namespace %s: %w", name, namespace, err)
}
return nil, err
}
// ensure that the statefulset has replia count set to at least 1
if *ss.Spec.Replicas == 0 {
svc.Log.Error("Stateful set has zero replicas", "Statefulset",
name, "Namespace", namespace)
return nil, fmt.Errorf("zero replicas were found in the statefulset %s in namespace %s",
name, namespace)
}
fieldSelector := fields.OneTermEqualSelector(
"status.phase",
"Running",
).String()
// Grab the labels from the embedded Pod spec templates
// podLabels := labels.FormatLabels(ss.Spec.Template.Labels)
podLabels := labels.FormatLabels(ss.Spec.Selector.MatchLabels)
pods, err := svc.Client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: podLabels,
FieldSelector: fieldSelector,
})
if err != nil {
svc.Log.Error("Unable list Pods", "Namespace", namespace, "error", err)
return nil, fmt.Errorf("unable to list Pods in namespace: %w", err)
}
pod, err := getRandomPodFromPodList(ss, pods)
if err != nil {
return nil, fmt.Errorf("unable to find Pod owned by Statefulset %s in namespace %s: %w",
name, namespace, err)
}
svc.Log.Info("Found running Pod owned by StatefulSet", "StatefulSetName", name,
"Namespace", namespace, "Pod", pod.Name, "PodIP", pod.Status.PodIP)
return pod, nil
}
================================================
FILE: internal/kubeops/statefulset_test.go
================================================
package kubeops
import (
"context"
"fmt"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/utils/pointer"
)
// createStatefulSet - creates a statefulset and returns the same
func createStatefulSet(client kubernetes.Interface, name, namespace string, replicas int32, t *testing.T) *appsv1.StatefulSet {
labels := map[string]string{
"app": "nginx",
}
selector := &metav1.LabelSelector{
MatchLabels: labels,
}
podTemplate := corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "nginx",
Image: "nginx",
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 80,
Protocol: corev1.ProtocolTCP,
},
},
},
},
},
}
statefulSet := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: appsv1.StatefulSetSpec{
Replicas: pointer.Int32(replicas),
ServiceName: "nginx-service",
Selector: selector,
Template: podTemplate,
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "data",
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("1Gi"),
},
},
},
},
},
},
}
obj, err := client.AppsV1().StatefulSets(namespace).Create(context.Background(), statefulSet, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create stateful set %s: %v", name, err)
}
return obj
}
// createStatefulSetPod - creates a new pod in the statefulset with index set to index
func createStatefulSetPod(
client kubernetes.Interface,
statefulSet *appsv1.StatefulSet,
ipAddress string,
index int,
t *testing.T,
) *corev1.Pod {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", statefulSet.Name, index),
Namespace: statefulSet.Namespace,
Labels: statefulSet.Spec.Template.ObjectMeta.Labels,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(statefulSet, appsv1.SchemeGroupVersion.WithKind("StatefulSet")),
},
},
Spec: statefulSet.Spec.Template.Spec,
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
PodIP: ipAddress,
},
}
podObj, err := client.CoreV1().Pods(pod.Namespace).Create(context.Background(), pod, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create pod owned by statefulset %v: %v", statefulSet.Name, err)
}
return podObj
}
func TestGetPodInStatefulSet(t *testing.T) {
testCases := []struct {
name string
ipAddress string
ssName string
ssNamespace string
replicas int32
createPod bool
createStatefulset bool
wantErr bool
errMsg string
}{
{
name: "when both StatefulSet and Pod does not exist",
ipAddress: "192.168.0.1",
ssName: "web",
ssNamespace: "default",
createPod: false,
replicas: 0,
createStatefulset: false,
wantErr: true,
errMsg: `unable to find statefulset`,
},
{
name: "when StatefulSet exists but the Pod does not exist",
ipAddress: "192.168.0.1",
ssName: "web",
ssNamespace: "default",
createPod: false,
replicas: 0,
createStatefulset: true,
wantErr: true,
errMsg: `zero replicas were found in the statefulset`,
},
{
name: "when both StatefulSet and Pod exist",
ipAddress: "192.168.0.1",
ssName: "web",
ssNamespace: "default",
createPod: true,
replicas: 1,
createStatefulset: true,
wantErr: false,
},
}
for _, testCase := range testCases {
tc := testCase
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
r := require.New(t)
client := fake.NewSimpleClientset()
svc := New(client, hclog.NewNullLogger())
var (
ss *appsv1.StatefulSet
pod *corev1.Pod
)
if tc.createStatefulset {
ss = createStatefulSet(client, tc.ssName, tc.ssNamespace, tc.replicas, t)
}
if tc.createPod {
pod = createStatefulSetPod(client, ss, tc.ipAddress, 0, t)
}
gotPod, err := svc.GetPodInStatefulSet(context.Background(), tc.ssName, tc.ssNamespace)
// if we do not expect and error in the test case
if !tc.wantErr {
r.NoError(err)
// if we have created both pods and statefulset
if tc.createPod && tc.createStatefulset {
r.Equal(gotPod, pod)
}
return
}
// if tc.wantErr {
// we are expecting an error
r.Error(err)
// check error message with the one defined in the test case
if tc.errMsg != "" {
r.Contains(err.Error(), tc.errMsg)
}
//}
})
}
}
================================================
FILE: internal/kubeops/string_gen.go
================================================
package kubeops
import (
"math/rand"
"github.com/google/uuid"
)
const (
charset = "abcdefghijklmnopqrstuvwxyz123456789"
)
// NewUUIDString - generates a new UUID and converts it to string
func NewUUIDString() (string, error) {
id, err := uuid.NewUUID()
if err != nil {
return "", err
}
return id.String(), nil
}
// RandString - generates random string with length
func RandString(length int) string {
// One change made in Go 1.20 is that math/rand is now random by default.
b := make([]byte, length)
for i := range b {
b[i] = charset[rand.Intn(len(charset))]
}
return string(b)
}
================================================
FILE: internal/logger/hclog.go
================================================
package logger
import (
"fmt"
"io"
"strings"
"github.com/hashicorp/go-hclog"
)
// NewHCLogger - return an instance of the logger
func NewHCLogger(logLevel, appName string, w io.Writer) hclog.Logger {
hcLevel := hclog.LevelFromString(strings.ToUpper(logLevel))
if hcLevel == hclog.NoLevel {
hcLevel = hclog.Info
}
var includeLocation bool
if hcLevel <= hclog.Debug {
// check if hcLevel is DEBUG or more verbose
includeLocation = true
}
l := hclog.New(&hclog.LoggerOptions{
Name: fmt.Sprintf("[%s]", appName),
Level: hcLevel,
Output: w,
JSONFormat: false,
IncludeLocation: includeLocation,
})
return l
}
================================================
FILE: justfile
================================================
default:
just --list
version := "0.0.1"
# build the binary in ./bin folder
build:
go build -o bin/netassert cmd/netassert/cli/*.go
# build and run the binary
run: build
bin/netassert
# run go test(s)
test:
go test -v -race ./...
# run the linter
lint:
golangci-lint run ./...
# remove the binary from ./bin folder
clean:
@rm -rf ./bin
# create a new kind k8s cluster called packet-test
kind-up:
kind create cluster --name packet-test --config ./e2e/clusters/kind/kind-config.yaml
# delete the kind k8s cluster called packet-test
kind-down:
kind delete clusters packet-test
# deployObj kubernetes manifests
k8s-apply:
kubectl apply -f ./e2e/manifests/workload.yaml
k8s-rm-apply:
kubectl delete -f ./e2e/manifests/workload.yaml
netpol-apply:
kubectl apply -f ./e2e/manifests/networkpolicies.yaml
netpol-rm-apply:
kubectl delete -f ./e2e/manifests/networkpolicies.yaml
calico-apply:
kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.31.3/manifests/calico.yaml
calico-rm-apply:
kubectl delete -f https://raw.githubusercontent.com/projectcalico/calico/v3.31.3/manifests/calico.yaml
# build docker image and tag it 0.0.01
docker-build:
docker build -f Dockerfile --no-cache --tag packet-capture:{{version}} .
# import image into the local kind cluster called packet-test
kind-import-image:
kind load docker-image packet-capture:{{version}} --name packet-test && kind load docker-image netassert-client:{{version}} --name packet-test
================================================
FILE: rbac/cluster-role.yaml
================================================
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: netassert
rules:
- apiGroups:
- ""
- "apps"
resources:
- deployments
- statefulsets
- daemonsets
- pods
verbs:
- get
##
- apiGroups:
- ""
- "apps"
resources:
- replicasets
- pods
verbs:
- list
##
- apiGroups:
- ""
resources:
- pods
- pods/ephemeralcontainers
verbs:
- watch
- patch
================================================
FILE: rbac/cluster-rolebinding.yaml
================================================
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: netassert
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: netassert
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: netassert-user