Repository: chaosblade-io/chaosblade-operator Branch: master Commit: 7609cdab1287 Files: 179 Total size: 1.1 MB Directory structure: gitextract_bd7o2tq4/ ├── .gitattributes ├── .github/ │ ├── PULL_REQUEST_TEMPLATE.md │ ├── issue_template.md │ └── workflows/ │ ├── ci.yml │ └── release.yml ├── .gitignore ├── BUILD.md ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── README_CN.md ├── build/ │ ├── bin/ │ │ ├── entrypoint │ │ └── user_setup │ ├── image/ │ │ ├── amd/ │ │ │ └── Dockerfile │ │ └── arm/ │ │ └── Dockerfile │ ├── musl/ │ │ └── Dockerfile │ └── spec.go ├── channel/ │ ├── client.go │ └── client_test.go ├── cmd/ │ ├── hookfs/ │ │ └── main.go │ └── manager/ │ └── main.go ├── deploy/ │ ├── crds/ │ │ └── chaosblade.io_chaosblades_crd.yaml │ ├── helm/ │ │ ├── chaosblade-operator/ │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── crds/ │ │ │ │ └── crd.yaml │ │ │ ├── templates/ │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── daemonset.yaml │ │ │ │ ├── deployment.yaml │ │ │ │ ├── rbac.yaml │ │ │ │ ├── secret.yaml │ │ │ │ └── service.yaml │ │ │ └── values.yaml │ │ └── chaosblade-operator-arm64/ │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── crds/ │ │ │ └── crd.yaml │ │ ├── templates/ │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── daemonset.yaml │ │ │ ├── deployment.yaml │ │ │ ├── rbac.yaml │ │ │ ├── secret.yaml │ │ │ └── service.yaml │ │ └── values.yaml │ ├── olm/ │ │ ├── Makefile │ │ └── deploy/ │ │ ├── crd.yaml │ │ ├── crds/ │ │ │ └── chaosblade_v1alpha1_chaosblade_crd.yaml │ │ ├── olm-catalog/ │ │ │ └── chaosblade-operator/ │ │ │ ├── 0.5.1/ │ │ │ │ ├── chaosblade-operator.v0.5.1.clusterserviceversion.yaml │ │ │ │ └── chaosblade_v1alpha1_chaosblade_crd.yaml │ │ │ ├── 0.6.0/ │ │ │ │ ├── chaosblade-operator.v0.6.0.clusterserviceversion.yaml │ │ │ │ └── chaosblade_v1alpha1_chaosblade_crd.yaml │ │ │ └── chaosblade-operator.package.yaml │ │ ├── operator.yaml │ │ ├── role.yaml │ │ ├── role_binding.yaml │ │ └── service_account.yaml │ └── oss/ │ ├── crd.yaml │ ├── operator.yaml │ ├── rbac.yaml │ ├── service.yaml │ └── webhook-cert-job.yaml ├── examples/ │ ├── create_services_in_batch.yaml │ ├── delay_pod_network_by_names.yaml │ ├── delete_pod_by_labels.yaml │ ├── delete_pod_by_names.yaml │ ├── fail_pod_by_labels.yaml │ ├── increase_container_cpu_load_by_id.yaml │ ├── kill_container_process_by_id.yaml │ ├── modify_service_traffic_policy.yaml │ ├── node-cpu-load.yml │ ├── node-disk-load-burn-read.yml │ ├── node-disk-load-burn-write.yml │ ├── node-disk-load-fill.yml │ ├── node-mem-load.yml │ ├── node-network-delay-by-names.yml │ ├── node-network-loss-by-names.yml │ ├── pod-bad-resource-size-cpu-mem.yaml │ ├── pod-bad-resource-size-cpu.yaml │ ├── pod-bad-resource-size-mem.yaml │ ├── pod-configmap-delete.yaml │ ├── pod-containercreating-by-pvc-error.yaml │ ├── pod-containercreating-disk.yaml │ ├── pod-cpu-load-by-names.yml │ ├── pod-delete_by_names.yaml │ ├── pod-failedmount-configmap.yaml │ ├── pod-failedmount-pvc.yaml │ ├── pod-failedmount-secret.yaml │ ├── pod-imagepullsecretserror-by-auth-corruption.yaml │ ├── pod-scheduling-failure.yaml │ ├── pod-taint-node.yaml │ ├── pod-terminating-by-finalizer.yaml │ ├── remove_container_by_id.yaml │ ├── tamper_container_dns_by_id.yaml │ └── test-configmap-delete.yaml ├── exec/ │ ├── container/ │ │ ├── application.go │ │ ├── container.go │ │ └── controller.go │ ├── controller.go │ ├── model/ │ │ ├── category.go │ │ ├── context.go │ │ ├── controller.go │ │ ├── controller_test.go │ │ ├── copy.go │ │ ├── deploy.go │ │ ├── download.go │ │ ├── executor.go │ │ ├── executor_copy.go │ │ ├── executor_nsexec.go │ │ ├── filter.go │ │ ├── filter_pod.go │ │ ├── filter_pod_test.go │ │ ├── model.go │ │ ├── osexp.go │ │ └── parallelizer.go │ ├── node/ │ │ ├── cniexp.go │ │ ├── controller.go │ │ ├── exec_helper.go │ │ ├── filter.go │ │ └── node.go │ ├── pod/ │ │ ├── badresourcesize.go │ │ ├── configmapdeleteexp.go │ │ ├── configmapdeleteexp_test.go │ │ ├── containercreating.go │ │ ├── containercreatingdisk.go │ │ ├── containercreatingdisk_test.go │ │ ├── controller.go │ │ ├── delete.go │ │ ├── failedmount.go │ │ ├── failexp.go │ │ ├── fsexp.go │ │ ├── imageconfigexp.go │ │ ├── imageconfigexp_test.go │ │ ├── imagepullsecretserror.go │ │ ├── imagepullsecretserror_test.go │ │ ├── pod.go │ │ ├── schedulingfailure.go │ │ ├── taintnode.go │ │ ├── taintnode_test.go │ │ └── terminating.go │ └── service/ │ ├── controller.go │ ├── create.go │ ├── modify.go │ └── service.go ├── go.mod ├── go.sum ├── hack/ │ ├── init.sh │ ├── update-gofmt.sh │ ├── update-imports.sh │ ├── verify-gofmt.sh │ └── verify-imports.sh ├── licenserc.toml ├── pkg/ │ ├── apis/ │ │ ├── addtoscheme_chaosblade_v1alpha1.go │ │ ├── apis.go │ │ └── chaosblade/ │ │ ├── group.go │ │ └── v1alpha1/ │ │ ├── doc.go │ │ ├── register.go │ │ ├── types.go │ │ └── zz_generated.deepcopy.go │ ├── controller/ │ │ ├── add_chaosblade.go │ │ ├── chaosblade/ │ │ │ ├── controller.go │ │ │ ├── daemonset.go │ │ │ └── predicate.go │ │ └── controller.go │ ├── hookfs/ │ │ ├── client.go │ │ ├── hook.go │ │ ├── server.go │ │ └── types.go │ ├── runtime/ │ │ ├── chaosblade/ │ │ │ └── chaosblade.go │ │ ├── product/ │ │ │ ├── aliyun/ │ │ │ │ └── aliyun.go │ │ │ └── community/ │ │ │ └── community.go │ │ └── runtime.go │ └── webhook/ │ ├── pod/ │ │ ├── mutator.go │ │ └── mutator_test.go │ └── webhook.go ├── scripts/ │ ├── show-version.sh │ └── version.sh └── version/ └── version.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitattributes ================================================ * text=auto eol=lf ================================================ FILE: .github/PULL_REQUEST_TEMPLATE.md ================================================ ### Describe what this PR does / why we need it ### Does this pull request fix one issue? ### Describe how you did it ### Describe how to verify it ### Special notes for reviews ================================================ FILE: .github/issue_template.md ================================================ ## Issue Description Type: *bug report* or *feature request* ### Describe what happened (or what feature you want) ### Describe what you expected to happen ### How to reproduce it (as minimally and precisely as possible) 1. 2. 3. ### Tell us your environment ### Anything else we need to know? ================================================ FILE: .github/workflows/ci.yml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. name: CI on: push: branches: [ main, master ] pull_request: branches: [ main, master ] workflow_dispatch: jobs: test: name: ${{ matrix.os }} - Test - Go ${{ matrix.go_version }} runs-on: ${{ matrix.os }} strategy: matrix: go_version: - "1.25" os: - ubuntu-latest steps: - name: Set Up Go ${{ matrix.go_version }} uses: actions/setup-go@v5 with: go-version: ${{ matrix.go_version }} id: go - name: Checkout id: checkout uses: actions/checkout@v4 - name: Cache Go modules uses: actions/cache@v4 with: path: | ~/.cache/go-build ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} restore-keys: | ${{ runner.os }}-go- - name: Run code style and import order verification run: make verify - name: Check License Header uses: korandoru/hawkeye@v6 - name: Tests id: test run: | make test build-matrix: name: Build on ${{ matrix.os }} (${{ matrix.goos }}/${{ matrix.goarch }}) runs-on: ${{ matrix.os }} strategy: matrix: include: - os: ubuntu-latest goos: linux goarch: amd64 platform: linux_amd64 runner_arch: x64 - os: ubuntu-latest goos: linux goarch: arm64 platform: linux_arm64 runner_arch: arm64 steps: - uses: actions/checkout@v4 with: fetch-depth: 0 # 获取完整的 Git 历史用于版本信息 - name: Set up Go uses: actions/setup-go@v5 with: go-version: '1.25' - name: Cache Go modules uses: actions/cache@v4 with: path: | ~/.cache/go-build ~/go/pkg/mod key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} restore-keys: | ${{ runner.os }}-go- - name: Install build dependencies run: | sudo apt-get update sudo apt-get install -y musl-tools build-essential # Install ARM64 cross-compilation tools if needed if [ "${{ matrix.goarch }}" = "arm64" ] && [ "$(uname -m)" != "aarch64" ]; then sudo apt-get install -y gcc-aarch64-linux-gnu g++-aarch64-linux-gnu fi - name: Build ${{ matrix.platform }} id: build run: | make ${{ matrix.platform }} - name: Verify Build Output id: verify run: | echo "Verifying build output for ${{ matrix.platform }}..." echo "Build architecture: $(uname -m)" # 检查 chaosblade-operator 二进制文件 if [ -f "build/_output/bin/chaosblade-operator" ]; then echo "✅ chaosblade-operator binary found" file build/_output/bin/chaosblade-operator ls -lh build/_output/bin/chaosblade-operator else echo "❌ chaosblade-operator binary not found" exit 1 fi # 检查 chaos_fuse 二进制文件 BLADE_VERSION=$(git describe --tags --abbrev=0 2>/dev/null | sed 's/^v//' || echo '0.0.0') BUILD_DIR="target/chaosblade-${BLADE_VERSION}" if [ -f "${BUILD_DIR}/bin/chaos_fuse" ]; then echo "✅ chaos_fuse binary found in ${BUILD_DIR}/bin/" file "${BUILD_DIR}/bin/chaos_fuse" ls -lh "${BUILD_DIR}/bin/chaos_fuse" else echo "❌ chaos_fuse binary not found in ${BUILD_DIR}/bin/" ls -la "${BUILD_DIR}/bin/" || echo "Build directory not found" exit 1 fi # 检查 YAML 文件 if [ -f "${BUILD_DIR}/yaml/chaosblade-k8s-spec-${BLADE_VERSION}.yaml" ]; then echo "✅ YAML specification file found" ls -lh "${BUILD_DIR}/yaml/" else echo "❌ YAML specification file not found" ls -la "${BUILD_DIR}/yaml/" || echo "YAML directory not found" exit 1 fi echo "=== Build Summary for ${{ matrix.platform }} ===" echo "All required files built successfully!" - name: Upload build artifacts uses: actions/upload-artifact@v4 with: name: ${{ matrix.platform }}-binaries path: | build/_output/bin/chaosblade-operator target/chaosblade-*/bin/ target/chaosblade-*/yaml/ retention-days: 7 ================================================ FILE: .github/workflows/release.yml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. name: Release on: push: tags: - 'v*' workflow_dispatch: inputs: version: description: 'Version number (e.g: 1.8.0)' required: true default: '1.8.0' draft: description: 'Create as draft release' required: false type: boolean default: false env: REGISTRY: ghcr.io IMAGE_NAME: chaosblade-io/chaosblade-operator GO_VERSION: '1.25' permissions: contents: write packages: write jobs: build-and-push-images: name: Build and Push Images runs-on: ${{ matrix.runs-on }} strategy: matrix: include: - os: linux arch: amd64 runs-on: ubuntu-latest dockerfile: build/image/amd/Dockerfile image_suffix: "" image_tag: "ghcr.io/chaosblade-io/chaosblade-operator" - os: linux arch: arm64 runs-on: ubuntu-24.04-arm dockerfile: build/image/arm/Dockerfile image_suffix: "-arm64" image_tag: "ghcr.io/chaosblade-io/chaosblade-operator-arm64" outputs: version: ${{ steps.version.outputs.version }} steps: - name: Checkout code uses: actions/checkout@v4 - name: Extract version from tag id: version run: | if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then VERSION="${{ github.event.inputs.version }}" else VERSION="${GITHUB_REF#refs/tags/}" VERSION="${VERSION#v}" fi echo "version=${VERSION}" >> $GITHUB_OUTPUT echo "tag=v${VERSION}" >> $GITHUB_OUTPUT - name: Set up Go uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} cache: true - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Log in to Container Registry uses: docker/login-action@v3 with: registry: ${{ env.REGISTRY }} username: ${{ secrets.GHCR_USER }} password: ${{ secrets.GHCR_PASSWORD }} - name: Build and Push ${{ matrix.arch }} image env: BLADE_VERSION: ${{ steps.version.outputs.version }} run: | echo "Building ${{ matrix.arch }} image..." echo "BLADE_VERSION: $BLADE_VERSION" echo "Image tag: ${{ matrix.image_tag }}:${{ steps.version.outputs.version }}" make build_linux_${{ matrix.arch }}_image echo "Built images:" podman images | grep chaosblade-operator echo "Pushing ${{ matrix.arch }} image..." podman push ${{ matrix.image_tag }}:${{ steps.version.outputs.version }} # Only push latest tag if version doesn't contain 'dev' if [[ "${{ steps.version.outputs.version }}" != *"dev"* ]]; then echo "Pushing latest tag..." podman tag ${{ matrix.image_tag }}:${{ steps.version.outputs.version }} ${{ matrix.image_tag }}:latest podman push ${{ matrix.image_tag }}:latest else echo "Skipping latest tag push for dev version: ${{ steps.version.outputs.version }}" fi package-and-upload-helm: name: Package and Upload Helm Charts runs-on: ubuntu-latest needs: build-and-push-images steps: - name: Checkout code uses: actions/checkout@v4 - name: Get version from previous job id: version run: | echo "version=${{ needs.build-and-push-images.outputs.version }}" >> $GITHUB_OUTPUT - name: Install Helm uses: azure/setup-helm@v3 with: version: v3.9.3 - name: Package Helm Charts env: BLADE_VERSION: ${{ steps.version.outputs.version }} run: | echo "Packaging Helm charts for version ${BLADE_VERSION}..." # Use Makefile tasks to build and package Helm charts make build_linux_amd64_helm make build_linux_arm64_helm echo "Generated Helm packages:" ls -la target/*.tgz - name: Setup OSSUTIL environment uses: yizhoumo/setup-ossutil@v1.1.3 env: BINARY_TAG: ${{ steps.version.outputs.version }} with: endpoint: ${{ secrets.OSS_ENDPOINT }} access-key-id: ${{ secrets.OSS_ACCESS_KEY_ID }} access-key-secret: ${{ secrets.OSS_ACCESS_KEY_SECRET }} ossutil-version: '1.7.14' - name: Upload Helm packages to OSS env: BINARY_TAG: ${{ steps.version.outputs.version }} run: | echo "Uploading Helm packages to OSS..." ossutil cp -f target/chaosblade-operator-amd64-${BINARY_TAG}.tgz oss://chaosblade/agent/github/${BINARY_TAG}/chaosblade-operator-amd64-${BINARY_TAG}.tgz ossutil cp -f target/chaosblade-operator-arm64-${BINARY_TAG}.tgz oss://chaosblade/agent/github/${BINARY_TAG}/chaosblade-operator-arm64-${BINARY_TAG}.tgz echo "Helm packages uploaded successfully" - name: Upload Helm packages as artifacts uses: actions/upload-artifact@v4 with: name: helm-packages path: | target/chaosblade-operator-amd64-${{ steps.version.outputs.version }}.tgz target/chaosblade-operator-arm64-${{ steps.version.outputs.version }}.tgz retention-days: 30 create-release: name: Create GitHub Release needs: [build-and-push-images, package-and-upload-helm] runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 - name: Get version from previous job id: version run: | echo "version=${{ needs.build-and-push-images.outputs.version }}" >> $GITHUB_OUTPUT - name: Download Helm packages uses: actions/download-artifact@v4 with: name: helm-packages - name: List downloaded files for debugging run: | echo "Current directory contents:" ls -la echo "Looking for .tgz files:" find . -name "*.tgz" -type f - name: Create GitHub Release uses: softprops/action-gh-release@v1 with: tag_name: v${{ steps.version.outputs.version }} name: v${{ steps.version.outputs.version }} generate_release_notes: true body: | ### Helm Package Downloads - [chaosblade-operator-amd64 (AMD64)](https://github.com/${{ github.repository }}/releases/download/v${{ steps.version.outputs.version }}/chaosblade-operator-amd64-${{ steps.version.outputs.version }}.tgz) - [chaosblade-operator-arm64 (ARM64)](https://github.com/${{ github.repository }}/releases/download/v${{ steps.version.outputs.version }}/chaosblade-operator-arm64-${{ steps.version.outputs.version }}.tgz) ### Docker Images ```bash # AMD64 docker pull ghcr.io/chaosblade-io/chaosblade-operator:v${{ steps.version.outputs.version }} # ARM64 docker pull ghcr.io/chaosblade-io/chaosblade-operator-arm64:v${{ steps.version.outputs.version }} ``` ### Helm Installation ```bash # AMD64 helm install chaosblade-operator ./chaosblade-operator-amd64-${{ steps.version.outputs.version }}.tgz # ARM64 helm install chaosblade-operator ./chaosblade-operator-arm64-${{ steps.version.outputs.version }}.tgz ``` ### OSS Download Links - [chaosblade-operator-amd64-${{ steps.version.outputs.version }}.tgz](https://chaosblade.oss-cn-hangzhou.aliyuncs.com/agent/github/${{ steps.version.outputs.version }}/chaosblade-operator-amd64-${{ steps.version.outputs.version }}.tgz) - [chaosblade-operator-arm64-${{ steps.version.outputs.version }}.tgz](https://chaosblade.oss-cn-hangzhou.aliyuncs.com/agent/github/${{ steps.version.outputs.version }}/chaosblade-operator-arm64-${{ steps.version.outputs.version }}.tgz) files: | chaosblade-operator-amd64-${{ steps.version.outputs.version }}.tgz chaosblade-operator-arm64-${{ steps.version.outputs.version }}.tgz draft: ${{ github.event.inputs.draft == 'true' }} prerelease: false ================================================ FILE: .gitignore ================================================ # Temporary Build Files build/_output build/_test target # Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode ### Emacs ### # -*- mode: gitignore; -*- *~ \#*\# /.emacs.desktop /.emacs.desktop.lock *.elc auto-save-list tramp .\#* # Org-mode .org-id-locations *_archive # flymake-mode *_flymake.* # eshell files /eshell/history /eshell/lastdir # elpa packages /elpa/ # reftex files *.rel # AUCTeX auto folder /auto/ # cask packages .cask/ dist/ # Flycheck flycheck_*.el # server auth directory /server/ # projectiles files .projectile projectile-bookmarks.eld deploy/helm/chaosblade-operator-*.tgz # directory configuration .dir-locals.el # saveplace places # url cache url/cache/ # cedet ede-projects.el # smex smex-items # company-statistics company-statistics-cache.el # anaconda-mode anaconda-mode/ ### Go ### # Binaries for programs and plugins *.exe *.exe~ *.dll *.so *.dylib .DS_Store # Test binary, build with 'go test -c' *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out vendor coverage.txt ### Vim ### # swap .sw[a-p] .*.sw[a-p] # session Session.vim # temporary .netrwhist # auto-generated tag files tags ### VisualStudioCode ### .vscode/* .history # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode ### Goland ### .idea ### vendor ### vendor ### project ### build/cache ================================================ FILE: BUILD.md ================================================ # ChaosBlade Operator Build Guide This document describes how to build the ChaosBlade Operator project. The project supports multi-platform builds, including Linux AMD64 and ARM64 architectures. ## Table of Contents - [Prerequisites](#prerequisites) - [Environment Variables](#environment-variables) - [Build Targets](#build-targets) - [Container Image Building](#container-image-building) - [Testing](#testing) - [Cleanup](#cleanup) - [Troubleshooting](#troubleshooting) ## Prerequisites ### Basic Requirements - **Go 1.21+**: For compiling Go code - **Git**: For retrieving version information - **Make**: For executing build scripts ### Container Runtime (Optional) The project automatically detects available container runtimes: - **Docker**: Recommended - **Podman**: As an alternative to Docker ### Cross-compilation Toolchain (Optional) For Linux builds of the `chaos_fuse` component, one of the following tools is required: #### Linux Systems ```bash # Ubuntu/Debian - Basic build tools sudo apt-get install musl-tools build-essential # For ARM64 cross-compilation (when building on x86_64 for ARM64) sudo apt-get install gcc-aarch64-linux-gnu g++-aarch64-linux-gnu # For native ARM64 compilation (when building on ARM64 for ARM64) # The standard gcc from build-essential is sufficient ``` #### macOS Systems ```bash # Install musl cross-compiler using Homebrew brew install FiloSottile/musl-cross/musl-cross ``` ### Platform Compatibility - **Build Platforms**: macOS, Linux - **Target Platforms**: Linux AMD64, Linux ARM64 - **Container Platforms**: Multi-architecture container builds supported ## Environment Variables ### Version-related | Variable | Description | Default | Example | |----------|-------------|---------|----------| | `BLADE_VERSION` | Version number | Git tag (e.g., `1.7.4`) | `1.8.0` | | `BLADE_VENDOR` | Vendor identifier | `community` | `enterprise` | ### Build-related | Variable | Description | Default | Example | |----------|-------------|---------|----------| | `CONTAINER_RUNTIME` | Container runtime | Auto-detected | `docker`, `podman` | | `JVM_SPEC_PATH` | JVM specification file path | None | `/path/to/jvm/spec` | ### Example Configuration ```bash export BLADE_VERSION=1.8.0 export BLADE_VENDOR=community export CONTAINER_RUNTIME=docker ``` ## Build Targets ### View Help Information ```bash make help ``` Displays all available build targets and environment variable descriptions. ### View Version Information ```bash make show-version ``` Displays current build version information, including: - Version number - Vendor - Git commit hash - Git branch - Build time - Go version - Target platform ### Complete Platform Builds #### Linux AMD64 Platform ```bash make linux_amd64 ``` Builds: - `chaosblade-operator` (Linux AMD64) - `chaos_fuse` (Linux AMD64) - YAML specification files #### Linux ARM64 Platform ```bash make linux_arm64 ``` Builds: - `chaosblade-operator` (Linux ARM64) - `chaos_fuse` (Linux ARM64) - YAML specification files ### Individual Component Builds #### Build Operator ```bash make operator GOOS=linux GOARCH=amd64 ``` #### Build chaos_fuse ```bash make chaos_fuse GOOS=linux GOARCH=amd64 ``` #### Generate YAML Specifications ```bash make yaml # Or generate YAML only make only_yaml ``` #### Build Local Binary ```bash make build_binary ``` ## Container Image Building ### Linux AMD64 Image ```bash make build_linux_amd64_image ``` Builds and tags as: `ghcr.io/chaosblade-io/chaosblade-operator:${BLADE_VERSION}` ### Linux ARM64 Image ```bash make build_linux_arm64_image ``` Builds and tags as: `ghcr.io/chaosblade-io/chaosblade-operator-arm64:${BLADE_VERSION}` ### Push Images ```bash make push_image ``` Pushes the built images to GitHub Container Registry. ## Testing Run the project test suite: ```bash make test ``` This command will: - Run all test cases - Enable race detection - Generate code coverage report (`coverage.txt`) ## Cleanup Clean all build artifacts: ```bash make clean ``` Cleans: - Go build cache - `target/` directory - Build image directories ## Build Output ### Directory Structure ``` target/chaosblade-${BLADE_VERSION}/ ├── bin/ │ └── chaos_fuse # File system hook tool └── yaml/ └── chaosblade-k8s-spec-${BLADE_VERSION}.yaml # Kubernetes specification file ``` ### Temporary Build Files ``` build/_output/bin/ ├── chaosblade-operator # Temporarily built operator file └── spec # Specification generator tool ``` ### File Description * `chaos_fuse` and `chaosblade-k8s-spec-${BLADE_VERSION}.yaml` need to be packaged into chaosblade for use (can be compiled and packaged directly in the chaosblade project); * `chaosblade-operator` needs to be packaged into the chaosblade-operator image for use (can be compiled directly using build_linux_xxx_image tasks); ## Troubleshooting ### chaos_fuse Build Failure **Issue**: Missing cross-compilation toolchain **Solutions**: 1. **Preferred approach**: Install appropriate cross-compiler ```bash # macOS brew install FiloSottile/musl-cross/musl-cross # Linux sudo apt-get install musl-tools ``` 2. **Alternative approach**: Use container build ```bash # Ensure Docker/Podman is running docker info # or podman info ``` 3. **Manually specify container runtime**: ```bash CONTAINER_RUNTIME=podman make chaos_fuse GOOS=linux GOARCH=amd64 ``` ### Container Build Failure **Issue**: Container runtime unavailable **Solutions**: 1. Check Docker/Podman status ```bash docker info # or podman info ``` 2. Start Docker service ```bash # macOS/Linux sudo systemctl start docker # or use Docker Desktop ``` ### Version Information Retrieval Failure **Issue**: Git repository information unavailable **Solutions**: 1. Ensure running build within Git repository 2. Manually specify version: ```bash BLADE_VERSION=1.8.0 make linux_amd64 ``` ### Permission Issues **Issue**: File permission errors **Solutions**: 1. Check directory permissions 2. Run build with appropriate user permissions 3. For container builds, ensure SELinux compatibility (`:Z` flag) ## Advanced Usage ### Custom Build Flags ```bash # Add custom ldflags GO_FLAGS="-ldflags '-X main.customFlag=value'" make operator ``` ### Parallel Builds ```bash # Use parallel builds for acceleration make -j4 linux_amd64 ``` ### Debug Builds ```bash # Enable verbose output make V=1 linux_amd64 ``` ## Contributing Guidelines When building new features or fixes: 1. Ensure all build targets work properly 2. Run complete test suite: `make test` 3. Verify cross-platform builds: `make linux_amd64 linux_arm64` 4. Check code coverage reports ## Related Documentation - [Contributing Guide](CONTRIBUTING.md) - [Changelog](CHANGELOG.md) ================================================ FILE: CHANGELOG.md ================================================ # ChaosBlade Operator 变更日志 本文档记录了 ChaosBlade Operator 的所有重要变更。 格式基于 [Keep a Changelog](https://keepachangelog.com/zh-CN/1.0.0/), 并且本项目遵循 [语义化版本](https://semver.org/lang/zh-CN/)。 ## [未发布] ### 新增 - 支持 Git Tag 自动化构建流程 - 增强版本信息展示,包含构建时间、Git提交等 - 新增版本命令支持 (`--version` 标志) ### 变更 - 优化 Makefile 构建流程 - 改进版本信息注入机制 ### 修复 - 修复版本信息显示问题 ## [1.7.4] - 2024-01-01 ### 新增 - 初始版本发布 - 支持基本的混沌工程功能 - 支持 Kubernetes 环境 ### 变更 - 基础架构搭建 - 核心功能实现 --- ## 变更类型说明 - **新增**: 新功能 - **变更**: 现有功能的变更 - **弃用**: 即将移除的功能 - **移除**: 已移除的功能 - **修复**: Bug修复 - **安全**: 安全相关修复 ## 如何贡献 1. 在 `## [未发布]` 部分添加你的变更 2. 使用上述变更类型标签 3. 提供清晰的变更描述 4. 如果是重大变更,请提供迁移指南 ## 版本发布流程 1. 创建 Git 标签: `git tag v1.8.0` 2. 推送标签: `git push origin v1.8.0` 3. GitHub Actions 自动触发构建 4. 自动创建 Release 和上传构建产物 5. 更新 CHANGELOG.md 中的版本信息 ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing to chaosblade Welcome to ChaosBlade world, here is a list of contributing guide for you. If you find something incorrect or missing content in the page, please submit an issue or PR to fix it. ## What can you do Every action to make the project better is encouraged. On GitHub, every improvement for the project could be via a PR (short for pull request). * If you find a typo, try to fix it! * If you find a bug, try to fix it! * If you find some redundant codes, try to remove them! * If you find some test cases missing, try to add them! * If you could enhance a feature, please **DO NOT** hesitate! * If you find code implicit, try to add comments to make it clear! * If you find code ugly, try to refactor that! * If you can help to improve documents, it could not be better! * If you find document incorrect, just do it and fix that! * ... Actually, it is impossible to list them completely. Just remember one principle: **WE ARE LOOKING FORWARD TO ANY PR FROM YOU.** ## Contributing ### Preparation Before you contribute, you need to register a Github ID. Prepare the following environment: * go * git ### Workflow We use the `master` branch as the development branch, which indicates that this is an unstable branch. Here is the workflow for contributors: 1. Fork to your own 2. Clone fork to the local repository 3. Create a new branch and work on it 4. Keep your branch in sync 5. Commit your changes (make sure your commit message concise) 6. Push your commits to your forked repository 7. Create a pull request Please follow [the pull request template](./.github/PULL_REQUEST_TEMPLATE.md). Please make sure the PR has a corresponding issue. After creating a PR, one or more reviewers will be assigned to the pull request. The reviewers will review the code. Before merging a PR, squash any fix review feedback, typo, merged, and rebased sorts of commits. The final commit message should be clear and concise. ### Compile Go to the project root directory which you cloned and execute compile: ```bash make ``` If you compile the Linux package on the Mac operating system, you can do: ```bash make build_linux ``` If you compile the chaosblade image, you can do: ```bash make build_image ``` clean compilation: ```bash make clean ``` ### Code Style See details of [CODE STYLE](./docs/code_styles.md) ### Commit Rules #### Commit Message Commit message could help reviewers better understand what is the purpose of submitted PR. It could help accelerate the code review procedure as well. We encourage contributors to use **EXPLICIT** commit message rather than an ambiguous message. In general, we advocate the following commit message type: * feat: A new feature * fix: A bug fix * docs: Documentation only changes * style: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc) * refactor: A code change that neither fixes a bug or adds a feature * perf: A code change that improves performance * test: Adding missing or correcting existing tests * chore: Changes to the build process or auxiliary tools and libraries such as documentation generation On the other side, we discourage contributors from committing message like the following ways: * ~~fix bug~~ * ~~update~~ * ~~add doc~~ If you get lost, please see [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) for a start. #### Commit Content Commit content represents all content changes included in one commit. We had better include things in one single commit which could support the reviewer's complete review without any other commits' help. In other word, contents in one single commit can pass the CI to avoid code mess. In brief, there are two minor rules for us to keep in mind: * avoid very large change in a commit; * complete and reviewable for each commit. No matter commit message or commit content, we do take more emphasis on code review. ### Pull Request We use [GitHub Issues](https://github.com/chaosblade-io/chaosblade-exec-jvm/issues) and [Pull Requests](https://github.com/chaosblade-io/chaosblade-exec-jvm/pulls) for trackers. If you find a typo in document, find a bug in code, or want new features, or want to give suggestions, you can [open an issue on GitHub](https://github.com/chaosblade-io/chaosblade-exec-jvm/issues/new) to report it. Please follow the guideline message in the issue template. If you want to contribute, please follow the [contribution workflow](#Workflow) and create a new pull request. If your PR contains large changes, e.g. component refactor or new components, please write detailed documents about its design and usage. Note that a single PR should not be too large. If heavy changes are required, it's better to separate the changes to a few individual PRs. ### Code Review All code should be well reviewed by one or more committers. Some principles: - Readability: Important code should be well-documented. Comply with our code style. - Elegance: New functions, classes or components should be well designed. - Testability: Important code should be well-tested (high unit test coverage). ## Others ### Code of Conduct *"In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation..."* See details of [CONTRIBUTOR COVENANT CODE OF CONDUCT](https://github.com/chaosblade-io/chaosblade-exec-jvm/blob/master/CODE_OF_CONDUCT.md) ### Sign your work The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` Then you just add a line to every git commit message: ``` Signed-off-by: Joe Smith ``` Use your real name (sorry, no pseudonyms or anonymous contributions.) If you set your `user.name` and `user.email` git configs, you can sign your commit automatically with `git commit -s`. ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 1999-2019 Alibaba Group Holding Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: Makefile ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. .PHONY: show-version linux_amd64 linux_arm64 pre_build operator chaos_fuse yaml build_binary build_linux_amd64_image build_linux_arm64_image build_linux_amd64_helm build_linux_arm64_helm build_linux_amd64_release build_linux_arm64_release push_image test clean help # Default target - show help when no target is specified .DEFAULT_GOAL := help # Container runtime configuration - compatible with Docker and Podman # Auto-detect available container runtime ifeq ($(CONTAINER_RUNTIME),) ifeq ($(shell command -v podman >/dev/null 2>&1 && podman info >/dev/null 2>&1 && echo "podman"),podman) CONTAINER_RUNTIME := podman else ifeq ($(shell command -v docker >/dev/null 2>&1 && docker info >/dev/null 2>&1 && echo "docker"),docker) CONTAINER_RUNTIME := docker else CONTAINER_RUNTIME := docker endif endif # Get current platform information CURRENT_OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') CURRENT_ARCH := $(shell uname -m) ifeq ($(CURRENT_ARCH),x86_64) CURRENT_ARCH := amd64 else ifeq ($(CURRENT_ARCH),aarch64) CURRENT_ARCH := arm64 endif GOOS := $(shell go env GOOS) GOARCH := $(shell go env GOARCH) UNAME := $(shell uname) # Version information retrieval ifeq ($(BLADE_VERSION), ) BLADE_VERSION := $(shell git describe --tags --abbrev=0 2>/dev/null | sed 's/^v//' || echo "0.0.0") endif ifeq ($(BLADE_VENDOR), ) BLADE_VENDOR=community endif # Dynamically get Git information GIT_COMMIT := $(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") BUILD_TIME := $(shell date -u '+%Y-%m-%dT%H:%M:%SZ') GO_VERSION := $(shell go version | awk '{print $$3}') PLATFORM := $(shell echo "$(GOOS)/$(GOARCH)") BUILD_TARGET=target BUILD_TARGET_DIR_NAME=chaosblade-$(BLADE_VERSION) BUILD_TARGET_PKG_DIR=$(BUILD_TARGET)/chaosblade-$(BLADE_VERSION) BUILD_TARGET_BIN=$(BUILD_TARGET_PKG_DIR)/bin BUILD_TARGET_YAML=$(BUILD_TARGET_PKG_DIR)/yaml BUILD_IMAGE_PATH=build/image/blade OS_YAML_FILE_NAME=chaosblade-k8s-spec-$(BLADE_VERSION).yaml OS_YAML_FILE_PATH=$(BUILD_TARGET_YAML)/$(OS_YAML_FILE_NAME) VERSION_PKG=github.com/chaosblade-io/chaosblade-operator/version # Complete version information ldflags VERSION_LDFLAGS=-X=$(VERSION_PKG).Version=$(BLADE_VERSION) \ -X=$(VERSION_PKG).Product=$(BLADE_VENDOR) \ -X=$(VERSION_PKG).BuildTime=$(BUILD_TIME) \ -X=$(VERSION_PKG).GitCommit=$(GIT_COMMIT) \ -X=$(VERSION_PKG).GitBranch=$(GIT_BRANCH) \ -X=$(VERSION_PKG).GoVersion=$(GO_VERSION) \ -X=$(VERSION_PKG).Platform=$(PLATFORM) \ -X=$(VERSION_PKG).CombinedVersion=$(BLADE_VERSION),$(BLADE_VENDOR) GO_FLAGS=-ldflags "$(VERSION_LDFLAGS)" GO_FLAGS_WITH_STATIC=-ldflags="-linkmode external -extldflags -static $(VERSION_LDFLAGS)" # Cross-compilation CC detection for chaos_fuse define detect_cc $(strip $(if $(and $(filter amd64,$(GOARCH)),$(shell command -v musl-gcc 2>/dev/null)),musl-gcc,\ $(if $(and $(filter amd64,$(GOARCH)),$(wildcard /usr/local/musl/bin/musl-gcc)),/usr/local/musl/bin/musl-gcc,\ $(if $(and $(filter amd64,$(GOARCH)),$(shell command -v x86_64-linux-musl-gcc 2>/dev/null)),x86_64-linux-musl-gcc,\ $(if $(and $(filter arm64,$(GOARCH)),$(shell command -v aarch64-linux-musl-gcc 2>/dev/null)),aarch64-linux-musl-gcc,\ $(if $(and $(filter amd64,$(GOARCH)),$(shell command -v gcc 2>/dev/null)),gcc,\ $(if $(and $(filter arm64,$(GOARCH)),$(shell command -v aarch64-linux-gnu-gcc 2>/dev/null)),aarch64-linux-gnu-gcc,\ $(if $(and $(filter arm64,$(GOARCH)),$(shell command -v gcc 2>/dev/null)),gcc,\ container)))))))) endef CC_FOR_CHAOS_FUSE := $(call detect_cc) # Display version information show-version: @echo "=== Build Version Information ===" @echo "Version: $(BLADE_VERSION)" @echo "Vendor: $(BLADE_VENDOR)" @echo "Git Commit: $(GIT_COMMIT)" @echo "Git Branch: $(GIT_BRANCH)" @echo "Build Time: $(BUILD_TIME)" @echo "Go Version: $(GO_VERSION)" @echo "Platform: $(PLATFORM)" @echo "==================" # Linux AMD64 platform build linux_amd64: show-version pre_build @echo "Building Linux AMD64 platform components..." $(MAKE) operator GOOS=linux GOARCH=amd64 @echo "chaosblade-operator build completed" $(MAKE) chaos_fuse GOOS=linux GOARCH=amd64 @echo "chaos_fuse build completed" $(MAKE) yaml GOOS=linux GOARCH=arm64 @echo "YAML specification file generation completed" @echo "Linux AMD64 platform build completed" # Linux ARM64 platform build linux_arm64: show-version pre_build @echo "Building Linux ARM64 platform components..." $(MAKE) operator GOOS=linux GOARCH=arm64 @echo "chaosblade-operator build completed" $(MAKE) chaos_fuse GOOS=linux GOARCH=arm64 @echo "chaos_fuse build completed" $(MAKE) yaml GOOS=linux GOARCH=arm64 @echo "YAML specification file generation completed" @echo "Linux ARM64 platform build completed" pre_build: @mkdir -p $(BUILD_TARGET_BIN) $(BUILD_TARGET_YAML) build/_output/bin operator: @echo "Building chaosblade-operator for $(GOOS)/$(GOARCH)..." @CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build $(GO_FLAGS) -o build/_output/bin/chaosblade-operator cmd/manager/main.go chaos_fuse: ## Build chaos_fuse for Linux (supports cross-compilation from macOS) ifeq ($(GOOS),linux) @echo "Detected CC for chaos_fuse: $(CC_FOR_CHAOS_FUSE)" @if [ "$(CC_FOR_CHAOS_FUSE)" != "container" ]; then \ echo "Building chaos_fuse for Linux $(GOARCH) using $(CC_FOR_CHAOS_FUSE)..."; \ CC=$(CC_FOR_CHAOS_FUSE) CGO_ENABLED=1 go build $(GO_FLAGS) -o $(BUILD_TARGET_BIN)/chaos_fuse cmd/hookfs/main.go; \ elif command -v $(CONTAINER_RUNTIME) >/dev/null 2>&1 && $(CONTAINER_RUNTIME) info >/dev/null 2>&1; then \ echo "Building chaos_fuse for Linux $(GOARCH) using $(CONTAINER_RUNTIME)..."; \ if [ "$(GOARCH)" = "amd64" ]; then \ $(CONTAINER_RUNTIME) run --rm -v $(PWD):/src:Z -w /src --platform linux/amd64 golang:1.21-alpine sh -c "apk add --no-cache musl-dev gcc && cd /src && CGO_ENABLED=1 go build $(GO_FLAGS) -o /src/$(BUILD_TARGET_BIN)/chaos_fuse cmd/hookfs/main.go" >/dev/null 2>&1; \ elif [ "$(GOARCH)" = "arm64" ]; then \ $(CONTAINER_RUNTIME) run --rm -v $(PWD):/src:Z -w /src golang:1.21-alpine sh -c "apk add --no-cache musl-dev gcc && cd /src && CGO_ENABLED=1 GOARCH=arm64 GOOS=linux go build $(GO_FLAGS) -o /src/$(BUILD_TARGET_BIN)/chaos_fuse cmd/hookfs/main.go" >/dev/null 2>&1; \ else \ echo "Unsupported architecture $(GOARCH) for chaos_fuse"; \ fi; \ else \ echo "Warning: No suitable cross-compilation toolchain found for chaos_fuse"; \ echo "Available options:"; \ echo " 1. Install musl-tools: apt-get install musl-tools (Ubuntu/Debian)"; \ echo " 2. Install musl-gcc: brew install FiloSottile/musl-cross/musl-cross (macOS)"; \ echo " 3. Install specific cross-compilers for ARM64: apt-get install gcc-aarch64-linux-gnu g++-aarch64-linux-gnu"; \ echo " 4. Use Docker/Podman with proper platform emulation"; \ fi else @echo "Skipping chaos_fuse build on $(GOOS) for target - Linux only" endif yaml: build/spec.go @echo "Building spec generator..." @GOOS=$(CURRENT_OS) GOARCH=$(CURRENT_ARCH) go build $(GO_FLAGS) -o build/_output/bin/spec $< @echo "Generating YAML specifications..." @GOOS=$(CURRENT_OS) GOARCH=$(CURRENT_ARCH) build/_output/bin/spec $(OS_YAML_FILE_PATH) $(if $(JVM_SPEC_PATH),$(JVM_SPEC_PATH),) only_yaml: pre_build yaml # Build binary files and display version information build_binary: show-version CGO_ENABLED=0 go build $(GO_FLAGS) -o $(BUILD_TARGET_BIN)/chaosblade-operator cmd/manager/main.go @echo "Binary file build completed: $(BUILD_TARGET_BIN)/chaosblade-operator" @echo "Version information:" @$(BUILD_TARGET_BIN)/chaosblade-operator version 2>/dev/null || echo "Unable to get version information" ##---------------------------------------------------------------------------- # build image build_linux_amd64_image: CGO_ENABLED=0 GOOS="linux" GOARCH="amd64" go build $(GO_FLAGS) -o build/_output/bin/chaosblade-operator cmd/manager/main.go $(CONTAINER_RUNTIME) buildx build -f build/image/amd/Dockerfile --platform=linux/amd64 -t ghcr.io/chaosblade-io/chaosblade-operator:${BLADE_VERSION} . build_linux_arm64_image: CGO_ENABLED=0 GOOS="linux" GOARCH="arm64" go build $(GO_FLAGS) -o build/_output/bin/chaosblade-operator cmd/manager/main.go $(CONTAINER_RUNTIME) buildx build -f build/image/arm/Dockerfile --platform=linux/arm64 -t ghcr.io/chaosblade-io/chaosblade-operator-arm64:${BLADE_VERSION} . push_image: $(CONTAINER_RUNTIME) push ghcr.io/chaosblade-io/chaosblade-operator:${BLADE_VERSION} $(CONTAINER_RUNTIME) push ghcr.io/chaosblade-io/chaosblade-operator-arm64:${BLADE_VERSION} # Build Helm packages with version updates build_linux_amd64_helm: show-version pre_build @echo "Building Linux AMD64 Helm package..." @# Update Chart.yaml versions @sed -i.bak 's/^appVersion: ".*"/appVersion: "$(BLADE_VERSION)"/' deploy/helm/chaosblade-operator/Chart.yaml @sed -i.bak 's/^version: .*/version: $(BLADE_VERSION)/' deploy/helm/chaosblade-operator/Chart.yaml @# Update values.yaml versions @sed -i.bak 's/^ version: .*/ version: $(BLADE_VERSION)/' deploy/helm/chaosblade-operator/values.yaml @sed -i.bak 's/^ version: .*/ version: $(BLADE_VERSION)/' deploy/helm/chaosblade-operator/values.yaml @# Clean up backup files @rm -f deploy/helm/chaosblade-operator/Chart.yaml.bak deploy/helm/chaosblade-operator/values.yaml.bak @# Package Helm chart @helm package deploy/helm/chaosblade-operator --destination $(BUILD_TARGET) --version $(BLADE_VERSION) --app-version $(BLADE_VERSION) @# Rename the package to include architecture @mv $(BUILD_TARGET)/chaosblade-operator-$(BLADE_VERSION).tgz $(BUILD_TARGET)/chaosblade-operator-amd64-$(BLADE_VERSION).tgz @echo "Linux AMD64 Helm package created: $(BUILD_TARGET)/chaosblade-operator-amd64-$(BLADE_VERSION).tgz" build_linux_arm64_helm: show-version pre_build @echo "Building Linux ARM64 Helm package..." @# Update Chart.yaml versions @sed -i.bak 's/^appVersion: ".*"/appVersion: "$(BLADE_VERSION)"/' deploy/helm/chaosblade-operator-arm64/Chart.yaml @sed -i.bak 's/^version: .*/version: $(BLADE_VERSION)/' deploy/helm/chaosblade-operator-arm64/Chart.yaml @# Update values.yaml versions @sed -i.bak 's/^ version: .*/ version: $(BLADE_VERSION)/' deploy/helm/chaosblade-operator-arm64/values.yaml @sed -i.bak 's/^ version: .*/ version: $(BLADE_VERSION)/' deploy/helm/chaosblade-operator-arm64/values.yaml @# Clean up backup files @rm -f deploy/helm/chaosblade-operator-arm64/Chart.yaml.bak deploy/helm/chaosblade-operator-arm64/values.yaml.bak @# Package Helm chart @helm package deploy/helm/chaosblade-operator-arm64 --destination $(BUILD_TARGET) --version $(BLADE_VERSION) --app-version $(BLADE_VERSION) @echo "Linux ARM64 Helm package created: $(BUILD_TARGET)/chaosblade-operator-arm64-$(BLADE_VERSION).tgz" ##---------------------------------------------------------------------------- build_linux_amd64_release: build_linux_amd64_image build_linux_amd64_helm build_linux_arm64_release: build_linux_arm64_image build_linux_arm64_helm # test test: go test -race -coverprofile=coverage.txt -covermode=atomic ./... # clean all build result clean: go clean ./... rm -rf $(BUILD_TARGET) rm -rf $(BUILD_IMAGE_PATH)/$(BUILD_TARGET_DIR_NAME) .PHONY: format format: license-format @echo "Running goimports and gofumpt to format Go code..." @./hack/update-imports.sh @./hack/update-gofmt.sh .PHONY: verify verify: @echo "Verifying Go code formatting and import order..." @./hack/verify-gofmt.sh @./hack/verify-imports.sh .PHONY: license-check license-check: @echo "Checking license headers..." docker run -it --rm -v $(shell pwd):/github/workspace ghcr.io/korandoru/hawkeye check .PHONY: license-format license-format: @echo "Formatting license headers..." docker run -it --rm -v $(shell pwd):/github/workspace ghcr.io/korandoru/hawkeye format # Help information help: @echo "Available build targets:" @echo " linux_amd64 - Build Linux AMD64 platform components (operator + chaos_fuse + yaml)" @echo " linux_arm64 - Build Linux ARM64 platform components (operator + chaos_fuse + yaml)" @echo " build_linux_amd64_image - Build Linux AMD64 Docker image" @echo " build_linux_arm64_image - Build Linux ARM64 Docker image" @echo " build_linux_amd64_helm - Build and package Linux AMD64 Helm chart" @echo " build_linux_arm64_helm - Build and package Linux ARM64 Helm chart" @echo " build_linux_amd64_release - Build image and Helm package for AMD64" @echo " build_linux_arm64_release - Build image and Helm package for ARM64" @echo " push_image - Push images to image registry" @echo " show-version - Display current version information" @echo " format - Format Go code using goimports and gofumpt" @echo " verify - Verify Go code formatting and import order" @echo " license-check - Check license headers in source files" @echo " clean - Clean build artifacts" @echo "" @echo "Version-related environment variables:" @echo " BLADE_VERSION - Specify version number (default: Git tag)" @echo " BLADE_VENDOR - Specify vendor (default: community)" @echo "" @echo "Build-related environment variables:" @echo " JVM_SPEC_PATH - Specify JVM specification file path (for container.JvmSpecFileForYaml)" @echo " CONTAINER_RUNTIME - Specify container runtime (docker or podman, auto-detected by default)" @echo "" ================================================ FILE: README.md ================================================ # Chaosblade-operator: A Chaos Engineering Tool for Cloud-native ![license](https://img.shields.io/github/license/chaosblade-io/chaosblade.svg) 中文版 [README](README_CN.md) ## Introduction Chaosblade Operator is a chaos experiments injection tool for cloud-native on kubernetes platform. By defining Kubernetes CRD to manage chaos experiments, each experiment has a very clear execution status. The tool has the characteristics of simple deployment, convenient execution, standardized implementation, and rich experiments. The chaos experimental model in chaosblade is well integrated with Kubernetes, which can realize the reuse of experiments such as basic resources, application services, and containers on the Kubernetes platform, which facilitates the expansion of resource experiments under Kubernetes, and can be executed uniformly through chaosblade cli tool. ## Supported experiments (continuously adding ...) The current experimental scenarios involve resources including Node, Pod, and Container. The specific supported experimental scenarios are as follows: * Node: * CPU: specify CPU usage * Network: specify network card, port, IP, etc. packet delay, packet loss, packet blocking, packet duplication, packet re-ordering, packet corruption, etc. * Process: specify process Hang, kill process, etc. * Disk: specify the directory disk occupation, disk IO read and write load, etc. * Memory: specify memory usage * Pod: * Network: specify network card, port, IP, etc. packet delay, packet loss, packet blocking, packet duplication, packet re-ordering, packet corruption, etc. * Disk: specify the directory disk occupation, disk IO read and write load, etc. * Memory: specify memory usage * Pod: kill pod, make pod stuck in ContainerCreating state by PVC mount failure, make pod stuck in ContainerCreating state by cloud disk PVC creation failure, make pod stuck in Terminating state by finalizer, make pod scheduling fail by injecting unreachable affinity rules, modify workload (Deployment/DaemonSet/StatefulSet) CPU/Memory resource limits to simulate bad resource sizing, mount non-existent ConfigMap/Secret/PVC volume to workload (Deployment/DaemonSet/StatefulSet) to simulate volume mount failure * IO: specify the file system io exception. Supports 31 file operations and 11 exception scenarios, such as "Too many open files", "Device or resource busy" and so on. * Container: * CPU: specify CPU usage * Network: specify network card, port, IP, etc. packet delay, packet loss, packet blocking, packet duplication, packet re-ordering, packet corruption, etc. * Process: specify process Hang, kill process, etc. * Disk: specify the directory disk occupation, disk IO read and write load, etc. * Memory: specify memory usage * Container: remove container * Service: * Service: create, modify service ## Local Build & Installation ## Build images ```shell # Under operator's root directory # For linux/amd64 make build_all # For linux/arm64 make build_all_amr64 ``` ### Build and install Helm Chart ```shell # Under operator's root directory cd deploy/helm # For linux/amd64 helm package ./chaosblade-operator kubectl create ns chaosblade helm install chaosblade chaosblade-operator-${version}.tgz --namespace chaosblade # For linux/arm64 helm package ./chaosblade-operator-arm64 kubectl create ns chaosblade helm install chaosblade chaosblade-operator-arm64-${version}.tgz --namespace chaosblade ``` ## Install and uninstall The lowest version of kubernetes supported is 1.12. Chaosblade operator can be installed through kubectl or helm, the installation method is as follows: Note: For the following `VERSION`, please use the latest version number instead ### Helm v2 * Download the latest `chaosblade-operator-VERSION-v2.tgz` package at [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) * Install using `helm install --namespace chaosblade --name chaosblade-operator chaosblade-operator-VERSION-v2.tgz` * Use `kubectl get pod -l part-of=chaosblade -n chaosblade` to check the installation status of the Pod. If both are running, the installation was successful * Use the following command to uninstall, pay attention to the execution order: ```shell script kubectl delete crd chaosblades.chaosblade.io helm del --purge chaosblade-operator ``` ### Helm v3 * Download the latest `chaosblade-operator-VERSION-v3.tgz` package at [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) * Use `helm install chaosblade-operator chaosblade-operator-VERSION-v3.tgz --namespace chaosblade` command to install * Use `kubectl get pod -l part-of=chaosblade -n chaosblade` to check the installation status of the Pod. If both are running, the installation was successful * Use the following command to uninstall, pay attention to the execution order: ```shell script kubectl delete crd chaosblades.chaosblade.io helm uninstall chaosblade-operator -n chaosblade ``` ### Kubectl * Download the latest `chaosblade-operator-yaml-VERSION.tar.gz` package at [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) * After decompression, execute `kubectl apply -f chaosblade-operator-yaml-VERSION/` installation * Use `kubectl get pod -l part-of=chaosblade -n chaosblade` to check the installation status of the Pod. If both are running, the installation was successful * Use the following command to uninstall, pay attention to the execution order: ```shell script kubectl delete crd chaosblades.chaosblade.io kubectl delete -f chaosblade-operator-yaml-VERSION/ ``` ## How to use You can run chaos experiments after installing the chaosblade operator. There are three ways to execute chaos experiments: * By configuring yaml file, use kubectl to execute * Executed using chaosblade cli tool * Use Kubernetes API to execute by writing code The following uses a specific case to illustrate the use of chaosblade-operator: simulate cn-hangzhou.192.168.0.205 node local port 40690 60% network packet loss. ### By configuring the yaml file, use kubectl to execute ``` apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: loss-node-network-by-names spec: experiments: - scope: node target: network action: loss desc: "node network loss" matchers: - name: names value: ["cn-hangzhou.192.168.0.205"] - name: percent value: ["60"] - name: interface value: ["eth0"] - name: local-port value: ["40690"] ``` Execute experiment: ``` kubectl apply -f loss-node-network-by-names.yaml ``` Query the experimental status, the returned information is as follows (spec and other contents are omitted): ``` ~ » kubectl get blade loss-node-network-by-names -o json { "apiVersion": "chaosblade.io/v1alpha1", "kind": "ChaosBlade", "metadata": { "creationTimestamp": "2019-11-04T09:56:36Z", "finalizers": [ "finalizer.chaosblade.io" ], "generation": 1, "name": "loss-node-network-by-names", "resourceVersion": "9262302", "selfLink": "/apis/chaosblade.io/v1alpha1/chaosblades/loss-node-network-by-names", "uid": "63a926dd-fee9-11e9-b3be-00163e136d88" }, "status": { "expStatuses": [ { "action": "loss", "resStatuses": [ { "id": "057acaa47ae69363", "kind": "node", "name": "cn-hangzhou.192.168.0.205", "nodeName": "cn-hangzhou.192.168.0.205", "state": "Success", "success": true, "uid": "e179b30d-df77-11e9-b3be-00163e136d88" } ], "scope": "node", "state": "Success", "success": true, "target": "network" } ], "phase": "Running" } } ``` From the above, you can clearly see the running status of the chaos experiment. Run the following command to stop the experiment: ``` kubectl delete -f loss-node-network-by-names.yaml ``` Or delete this blade resource directly: ``` kubectl delete blade loss-node-network-by-names ``` You can also edit the yaml file to update the content of the experiment and the chaosblade operator will complete the update of the experiment. See more examples: [Examples](https://github.com/chaosblade-io/chaosblade-operator/tree/master/examples) ### Execute with chaosblade cli tool ``` blade create k8s node-network loss --percent 60 --interface eth0 --local-port 40690 --names cn-hangzhou.192.168.0.205 --kubeconfig config ``` If the execution fails, a detailed error message is returned; if the execution is successful, the experiment UID is returned: ``` {"code":200,"success":true,"result":"e647064f5f20953c"} ``` You can query the status of the experiment with the following command: ``` blade query k8s create e647064f5f20953c --kubeconfig config { "code": 200, "success": true, "result": { "uid": "e647064f5f20953c", "success": true, "error": "", "statuses": [ { "id": "fa471a6285ec45f5", "uid": "e179b30d-df77-11e9-b3be-00163e136d88", "name": "cn-hangzhou.192.168.0.205", "state": "Success", "kind": "node", "success": true, "nodeName": "cn-hangzhou.192.168.0.205" } ] } } ``` Destroy experiment: ``` blade destroy e647064f5f20953c ``` In addition to the above two methods, you can also use the kubernetes client-go api for execution. For details, please refer to: [executor.go](https://github.com/chaosblade-io/chaosblade/blob/master/exec/kubernetes/executor.go) code implementation. [Chinese documentation](https://chaosblade-io.gitbook.io/chaosblade-help-zh-cn/blade-create-k8s) ## Questions & Suggestions If you encounter problems during installation and use, or suggestions and new features, all projects (including other projects) can be submitted to [Github Issues](https://github.com/chaosblade-io/chaosblade/issues) You can also contact us via: * Dingding group: 23177705 * Gitter room: [chaosblade community](https://gitter.im/chaosblade-io/community) * Email: chaosblade.io.01@gmail.com * Twitter: [chaosblade.io](https://twitter.com/ChaosbladeI) ## Contributions We welcome every issue and PR. Even a punctuation mark, how to participate in the contribution please read the [CONTRIBUTING](CONTRIBUTING.md) document, or contact us through the above method. ## Open source license Chaosblade-operator is licensed under the Apache 2.0 license. For details, please read [LICENSE](LICENSE) ================================================ FILE: README_CN.md ================================================ # Chaosblade Operator: 面向云原生的混沌工程执行工具 ![license](https://img.shields.io/github/license/chaosblade-io/chaosblade.svg) ## 介绍 Chaosblade Operator 是混沌工程实验工具 ChaosBlade 下的一款面向云原生领域的混沌实验注入工具,可单独部署使用。通过定义 Kubernetes CRD 来管理混沌实验,每个实验都有非常明确的执行状态。工具具有部署简单、执行便捷、标准化实现、场景丰富等特点。将 ChaosBlade 混沌实验模型与 Kubernetes CRD 很好的结合在一起,可以实现基础资源、应用服务、容器等场景在 Kubernetes 平台上场景复用,方便了 Kubernetes 下资源场景的扩展,而且可通过 chaosblade cli 统一执行调用。 ## 支持的场景(持续新增中...) 目前实验场景涉及到资源包含 Node、Pod、Container,具体支持的场景如下: * Node: * CPU: 指定 CPU 使用率 * 网络: 指定网卡、端口、IP 等包延迟、丢包、包阻塞、包重复、包乱序、包损坏等 * 进程:指定进程 Hang、强杀指定进程等 * 磁盘:指定目录磁盘填充、磁盘 IO 读写负载等 * 内存:指定内存使用率 * Pod: * 网络:指定网卡、端口、IP 等包延迟、丢包、包阻塞、包重复、包乱序、包损坏等 * 磁盘:指定目录磁盘填充、磁盘 IO 读写负载等 * 内存:指定内存使用率 * Pod:杀 Pod、通过 PVC 挂载失败使 Pod 卡在 ContainerCreating 状态、通过云盘 PVC 创建失败使 Pod 卡在 ContainerCreating 状态、通过 finalizer 使 Pod 卡在 Terminating 状态、通过注入无法满足的亲和性规则使 Pod 调度失败、修改工作负载(Deployment/DaemonSet/StatefulSet)的 CPU/Memory 资源限制模拟资源配置异常、挂载不存在的 ConfigMap/Secret/PVC 类型 Volume 到工作负载(Deployment/DaemonSet/StatefulSet)模拟卷挂载失败 * Container: * CPU: 指定 CPU 使用率 * 网络: 指定网卡、端口、IP 等包延迟、丢包、包阻塞、包重复、包乱序、包损坏等 * 进程:指定进程 Hang、强杀指定进程等 * 磁盘:指定目录磁盘填充、磁盘 IO 读写负载等 * 内存:指定内存使用率 * Container: 杀 Container * Service: * Service: 创建、修改Service ## 本地构建&安装 ### 构造镜像 ```shell # operator 根目录下 # linux/amd64 make build_all # linux/arm64 make build_all_amr64 ``` ### 构造并安装 Helm Chart ```shell # operator 根目录下 cd deploy/helm # linux/amd64 helm package ./chaosblade-operator kubectl create ns chaosblade helm install chaosblade chaosblade-operator-${version}.tgz --namespace chaosblade # linux/arm64 helm package ./chaosblade-operator-arm64 kubectl create ns chaosblade helm install chaosblade chaosblade-operator-arm64-${version}.tgz --namespace chaosblade ``` ## 安装&卸载 支持的 Kubernetes 最小版本是 v1.12,chaosblade operator 可通过 kubectl 或者 helm 进行安装,安装方式如下: 注意:以下的 `VERSION` 请使用最新的版本号替代 ### Helm v2 * 在 [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) 地址下载最新的 `chaosblade-operator-VERSION-v2.tgz` 包 * 使用 `helm install --namespace kube-system --name chaosblade-operator chaosblade-operator-VERSION-v2.tgz` 命令安装 * 使用 `kubectl get pod -l part-of=chaosblade -n kube-system` 查看 Pod 的安装状态,如果都是 running 状态,说明安装成功 * 使用以下命令进行卸载,注意执行顺序: ```shell script kubectl delete crd chaosblades.chaosblade.io helm del --purge chaosblade-operator ``` ### Helm v3 * 在 [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) 地址下载最新的 `chaosblade-operator-VERSION-v3.tgz` 包 * 使用 `helm install chaosblade-operator chaosblade-operator-VERSION-v3.tgz --namespace kube-system` 命令安装 * 使用 `kubectl get pod -l part-of=chaosblade -n kube-system` 查看 Pod 的安装状态,如果都是 running 状态,说明安装成功 * 使用以下命令卸载,注意执行顺序: ```shell script kubectl delete crd chaosblades.chaosblade.io helm uninstall chaosblade-operator -n kube-system ``` ### Kubectl * 在 [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) 地址下载最新的 `chaosblade-operator-yaml-VERSION.tar.gz` 包 * 解压后执行 `kubectl apply -f chaosblade-operator-yaml-VERSION/` 安装 * 使用 `kubectl get pod -l part-of=chaosblade -n kube-system` 查看 Pod 的安装状态,如果都是 running 状态,说明安装成功 * 使用以下命令卸载,注意执行顺序: ```shell script kubectl delete crd chaosblades.chaosblade.io kubectl delete -f chaosblade-operator-yaml-VERSION/ ``` ## 使用 安装 chaosblade operator 后即可执行混沌实验,执行方式有以下三种: * 通过配置 yaml 方式,使用 kubectl 执行 * 使用 chaosblade cli 工具执行 * 通过编写代码调用 Kubernetes API 执行 下面通过一个具体的案例来说明 chaosblade-operator 的使用:模拟 cn-hangzhou.192.168.0.205 节点本地端口 40690 60% 的网络丢包。 ### 通过配置 yaml 方式,使用 kubectl 执行 ``` apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: loss-node-network-by-names spec: experiments: - scope: node target: network action: loss desc: "node network loss" matchers: - name: names value: ["cn-hangzhou.192.168.0.205"] - name: percent value: ["60"] - name: interface value: ["eth0"] - name: local-port value: ["40690"] ``` 执行实验: ``` kubectl apply -f loss-node-network-by-names.yaml ``` 查询实验状态,返回信息如下(省略了 spec 等内容): ``` ~ » kubectl get blade loss-node-network-by-names -o json { "apiVersion": "chaosblade.io/v1alpha1", "kind": "ChaosBlade", "metadata": { "creationTimestamp": "2019-11-04T09:56:36Z", "finalizers": [ "finalizer.chaosblade.io" ], "generation": 1, "name": "loss-node-network-by-names", "resourceVersion": "9262302", "selfLink": "/apis/chaosblade.io/v1alpha1/chaosblades/loss-node-network-by-names", "uid": "63a926dd-fee9-11e9-b3be-00163e136d88" }, "status": { "expStatuses": [ { "action": "loss", "resStatuses": [ { "id": "057acaa47ae69363", "kind": "node", "name": "cn-hangzhou.192.168.0.205", "nodeName": "cn-hangzhou.192.168.0.205", "state": "Success", "success": true, "uid": "e179b30d-df77-11e9-b3be-00163e136d88" } ], "scope": "node", "state": "Success", "success": true, "target": "network" } ], "phase": "Running" } } ``` 通过以上内容可以很清晰的看出混沌实验的运行状态,执行以下命令停止实验: ``` kubectl delete -f loss-node-network-by-names.yaml ``` 或者直接删除此 blade 资源 ``` kubectl delete blade loss-node-network-by-names ``` 还可以编辑 yaml 文件,更新实验内容执行,chaosblade operator 会完成实验的更新操作。更多案例请查看 [Examples](https://github.com/chaosblade-io/chaosblade-operator/tree/master/examples) ### 使用 chaosblade cli 工具执行 ``` blade create k8s node-network loss --percent 60 --interface eth0 --local-port 40690 --names cn-hangzhou.192.168.0.205 --kubeconfig config ``` 如果执行失败,会返回详细的错误信息;如果执行成功,会返回实验的 UID: ``` {"code":200,"success":true,"result":"e647064f5f20953c"} ``` 可通过以下命令查询实验状态: ``` blade query k8s create e647064f5f20953c --kubeconfig config { "code": 200, "success": true, "result": { "uid": "e647064f5f20953c", "success": true, "error": "", "statuses": [ { "id": "fa471a6285ec45f5", "uid": "e179b30d-df77-11e9-b3be-00163e136d88", "name": "cn-hangzhou.192.168.0.205", "state": "Success", "kind": "node", "success": true, "nodeName": "cn-hangzhou.192.168.0.205" } ] } } ``` 销毁实验: ``` blade destroy e647064f5f20953c ``` 除了上述两种方式调用外,还可以使用 kubernetes client-go 方式执行,具体可参考:[executor.go](https://github.com/chaosblade-io/chaosblade/blob/master/exec/kubernetes/executor.go) 代码实现。 [中文使用文档](https://chaosblade-io.gitbook.io/chaosblade-help-zh-cn/blade-create-k8s) ## 问题&建议 如果在安装使用过程中遇到问题,或者建议和新功能,所有项目(包含其他项目)的问题都可以提交到[Github Issues](https://github.com/chaosblade-io/chaosblade/issues) 你也可以通过以下方式联系我们: * 钉钉群(推荐):23177705 * Gitter room: [chaosblade community](https://gitter.im/chaosblade-io/community) * 邮箱:chaosblade.io.01@gmail.com * Twitter: [chaosblade.io](https://twitter.com/ChaosbladeI) ## 参与贡献 我们非常欢迎每个 Issue 和 PR,即使一个标点符号,如何参加贡献请阅读 [CONTRIBUTING](CONTRIBUTING.md) 文档,或者通过上述的方式联系我们。 ## 开源许可证 Chaosblade-operator 遵循 Apache 2.0 许可证,详细内容请阅读 [LICENSE](LICENSE) ================================================ FILE: build/bin/entrypoint ================================================ #!/bin/sh -e # This is documented here: # https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines #if ! whoami &>/dev/null; then # if [ -w /etc/passwd ]; then # echo "${USER_NAME:-chaosblade-operator}:x:$(id -u):$(id -g):${USER_NAME:-chaosblade-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd # fi #fi exec ${OPERATOR} $@ ================================================ FILE: build/bin/user_setup ================================================ #!/bin/sh set -x # ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) mkdir -p ${HOME} chown ${USER_UID}:0 ${HOME} chmod ug+rwx ${HOME} # runtime user will need to be able to self-insert in /etc/passwd chmod g+rw /etc/passwd # no need for this script to remain in the image after running rm $0 ================================================ FILE: build/image/amd/Dockerfile ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. FROM --platform=linux/amd64 alpine:latest as builder ENV OPERATOR=/usr/local/bin/chaosblade-operator COPY build/_output/bin/chaosblade-operator /usr/local/bin/ # Build platform-specific scripts for AMD64 FROM --platform=linux/amd64 alpine:latest as script_builder RUN apk add --no-cache bash COPY build/bin /tmp/scripts RUN chmod +x /tmp/scripts/* FROM --platform=linux/amd64 registry.access.redhat.com/ubi8/ubi-minimal:latest ENV OPERATOR=/usr/local/bin/chaosblade-operator \ CHAOSBLADE_HOME=/opt/chaosblade COPY --from=builder ${OPERATOR} /usr/local/bin/ COPY --from=script_builder /tmp/scripts /usr/local/bin RUN chmod 777 /usr/local/bin/user_setup RUN /usr/local/bin/user_setup ENTRYPOINT ["/usr/local/bin/entrypoint"] ================================================ FILE: build/image/arm/Dockerfile ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. FROM alpine:latest as builder ENV OPERATOR=/usr/local/bin/chaosblade-operator COPY build/_output/bin/chaosblade-operator /usr/local/bin/ # Build platform-specific scripts for ARM64 FROM alpine:latest as script_builder RUN apk add --no-cache bash COPY build/bin /tmp/scripts RUN chmod +x /tmp/scripts/* FROM registry.access.redhat.com/ubi8/ubi-minimal:latest ENV OPERATOR=/usr/local/bin/chaosblade-operator \ CHAOSBLADE_HOME=/opt/chaosblade COPY --from=builder ${OPERATOR} /usr/local/bin/ COPY --from=script_builder /tmp/scripts /usr/local/bin RUN chmod 777 /usr/local/bin/user_setup RUN /usr/local/bin/user_setup ENTRYPOINT ["/usr/local/bin/entrypoint"] ================================================ FILE: build/musl/Dockerfile ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. FROM golang:1.20.13 LABEL maintainer="Changjun Xiao" # # The image is used to build chaosblade for musl RUN wget http://www.musl-libc.org/releases/musl-1.1.21.tar.gz \ && tar -zxvf musl-1.1.21.tar.gz \ && rm musl-1.1.21.tar.gz \ && cd musl* \ && ./configure \ && make \ && make install \ && rm -rf musl* ENV CC /usr/local/musl/bin/musl-gcc ENV GOOS linux ENTRYPOINT [ "make" ] ================================================ FILE: build/spec.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package main import ( "log" "os" "github.com/chaosblade-io/chaosblade-operator/exec/container" "github.com/chaosblade-io/chaosblade-operator/exec/node" "github.com/chaosblade-io/chaosblade-operator/exec/pod" "github.com/chaosblade-io/chaosblade-operator/exec/service" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" ) // main creates the yaml file of the experiments about kubernetes func main() { if len(os.Args) < 2 { log.Panicln("less yaml file path") } if len(os.Args) == 3 { container.JvmSpecPathForYaml = os.Args[2] } err := util.CreateYamlFile(getModels(), os.Args[1]) if err != nil { log.Panicf("create yaml file error, %v", err) } } func getModels() *spec.Models { models := make([]*spec.Models, 0) nodeResourceModelSpec := node.NewResourceModelSpec(nil) for _, modelSpec := range nodeResourceModelSpec.ExpModels() { model := util.ConvertSpecToModels(modelSpec, spec.ExpPrepareModel{}, nodeResourceModelSpec.Scope()) models = append(models, model) } podResourceModelSpec := pod.NewResourceModelSpec(nil) for _, modelSpec := range podResourceModelSpec.ExpModels() { model := util.ConvertSpecToModels(modelSpec, spec.ExpPrepareModel{}, podResourceModelSpec.Scope()) models = append(models, model) } containerResourceModelSpec := container.NewResourceModelSpec(nil) for _, modelSpec := range containerResourceModelSpec.ExpModels() { model := util.ConvertSpecToModels(modelSpec, spec.ExpPrepareModel{}, containerResourceModelSpec.Scope()) models = append(models, model) } serviceResourceModelSpec := service.NewResourceModelSpec(nil) for _, modelSpec := range serviceResourceModelSpec.ExpModels() { model := util.ConvertSpecToModels(modelSpec, spec.ExpPrepareModel{}, serviceResourceModelSpec.Scope()) models = append(models, model) } return util.MergeModels(models...) } ================================================ FILE: channel/client.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package channel import ( "bytes" "fmt" "io" "net/url" "strings" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" "sigs.k8s.io/controller-runtime/pkg/client" ) // Client contains the kubernetes client, operator client and kubeconfig type Client struct { kubernetes.Interface client.Client Config *rest.Config } // NewClientFunc returns the controller client func NewClientFunc() client.NewClientFunc { return func(config *rest.Config, options client.Options) (client.Client, error) { c, err := client.New(config, options) if err != nil { return nil, err } return &Client{ Interface: kubernetes.NewForConfigOrDie(config), Client: c, Config: config, }, nil } } type IOStreams struct { In io.Reader Out io.Writer ErrOut io.Writer } type StreamOptions struct { IOStreams Stdin bool TTY bool OutDecoder func(bytes []byte) interface{} ErrDecoder func(bytes []byte) interface{} } type ExecOptions struct { StreamOptions PodName string PodNamespace string ContainerName string Command []string IgnoreOutput bool } // Exec command in pod func (c *Client) Exec(options *ExecOptions) interface{} { logFields := logrus.WithFields(logrus.Fields{ "command": options.Command, "podName": options.PodName, "podNamespace": options.PodNamespace, "container": options.ContainerName, }) logFields.Infof("Exec command in pod") request := c.CoreV1().RESTClient().Post(). Resource("pods"). Name(options.PodName). Namespace(options.PodNamespace). SubResource("exec"). VersionedParams( &corev1.PodExecOptions{ Container: options.ContainerName, Command: options.Command, Stdin: options.Stdin, Stdout: true, Stderr: true, TTY: options.TTY, }, scheme.ParameterCodec, ) output := bytes.NewBuffer([]byte{}) options.Out = output errput := bytes.NewBuffer([]byte{}) options.ErrOut = errput err := execute("POST", request.URL(), c.Config, options) errMsg := strings.TrimSpace(errput.String()) outMsg := strings.TrimSpace(output.String()) execLog := logFields.WithFields(logrus.Fields{ "err": errMsg, "out": outMsg, }) if errMsg != "" { execLog.Infof("get err message") return options.ErrDecoder(errput.Bytes()) } if err != nil { execLog.WithError(err).Errorln("Invoke exec command error") return options.ErrDecoder([]byte(err.Error())) } if outMsg != "" { execLog.Infof("get output message") return options.OutDecoder(output.Bytes()) } if options.IgnoreOutput { return nil } return options.ErrDecoder([]byte(fmt.Sprintf("cannot get output of pods/%s/exec, maybe kubelet cannot be accessed or container not found", options.PodName))) } // "172.21.1.11:8080/api/v1/namespaces/default/pods/my-nginx-3855515330-l1uqk/exec // ?container=my-nginx&stdin=1&stdout=1&stderr=1&tty=1&command=%2Fbin%2Fbash" func execute(method string, url *url.URL, config *rest.Config, options *ExecOptions) error { exec, err := remotecommand.NewSPDYExecutor(config, method, url) if err != nil { return err } return exec.Stream(remotecommand.StreamOptions{ Stdin: options.StreamOptions.In, Stdout: options.StreamOptions.Out, Stderr: options.StreamOptions.ErrOut, Tty: options.StreamOptions.TTY, }) } ================================================ FILE: channel/client_test.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package channel import ( "testing" ) func TestClient_Exec(t *testing.T) { // Skip this test as it requires a real Kubernetes cluster connection t.Skip("Skipping TestClient_Exec: requires Kubernetes cluster connectivity") } ================================================ FILE: cmd/hookfs/main.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package main import ( "context" "flag" "fmt" "math/rand" "os" "os/exec" "time" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/ethercflow/hookfs/hookfs" "github.com/sirupsen/logrus" "sigs.k8s.io/controller-runtime/pkg/manager/signals" chaosbladehook "github.com/chaosblade-io/chaosblade-operator/pkg/hookfs" ) var ( address string pidFile string original string mountpoint string ) func main() { flag.StringVar(&address, "address", ":65534", "The address to bind") flag.StringVar(&original, "original", "", "Mapping of the original disk, not affected by the drill") flag.StringVar(&mountpoint, "mountpoint", "", "The disk of the drill. The affected directories are controlled by the path flag.") rand.Seed(time.Now().UnixNano()) flag.Parse() logFields := logrus.WithFields(logrus.Fields{ "address": address, "original": original, "mountpoint": mountpoint, }) stopCtx := signals.SetupSignalHandler() chaosbladeHookServer := chaosbladehook.NewChaosbladeHookServer(address) logFields.Infoln("Start chaosblade hook server.") go chaosbladeHookServer.Start(stopCtx) logFields.Infoln("Start fuse server.") if err := startFuseServer(stopCtx); err != nil { logFields.WithError(err).Fatalln("Start fuse server failed") } } // startFuseServer starts hookfs server func startFuseServer(stop context.Context) error { if !util.IsExist(original) { if err := os.MkdirAll(original, os.FileMode(755)); err != nil { return fmt.Errorf("create original directory error, %v", err) } } if !util.IsExist(mountpoint) { if err := os.MkdirAll(mountpoint, os.FileMode(755)); err != nil { return fmt.Errorf("create mountpoint directory error, %v", err) } } fs, err := hookfs.NewHookFs(original, mountpoint, &chaosbladehook.ChaosbladeHook{MountPoint: mountpoint}) if err != nil { return fmt.Errorf("create hookfs error, %v", err) } errCh := make(chan error) go func() { errCh <- fs.Serve() }() for { select { case <-stop.Done(): logFields := logrus.WithFields(logrus.Fields{ "address": address, "original": original, "mountpoint": mountpoint, }) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() cmd := exec.CommandContext(ctx, "fusermount", "-zu", mountpoint) logFields.Infof("Start unmount fuse volume, cmd: %v", cmd) if err := cmd.Run(); err != nil { logFields.WithError(err).Errorln("Failed to execute fusermount") } return err case err := <-errCh: if err != nil { return err } } } } ================================================ FILE: cmd/manager/main.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package main import ( "context" "flag" "net/http" "runtime" "strings" "github.com/operator-framework/operator-sdk/pkg/k8sutil" "github.com/operator-framework/operator-sdk/pkg/leader" "github.com/operator-framework/operator-sdk/pkg/log/zap" sdkVersion "github.com/operator-framework/operator-sdk/version" "github.com/sirupsen/logrus" "github.com/spf13/pflag" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimeutil "k8s.io/apimachinery/pkg/util/runtime" "sigs.k8s.io/controller-runtime/pkg/webhook" apiruntime "k8s.io/apimachinery/pkg/runtime" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/pkg/apis" "github.com/chaosblade-io/chaosblade-operator/pkg/controller" operator "github.com/chaosblade-io/chaosblade-operator/pkg/runtime" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" webhookcfg "github.com/chaosblade-io/chaosblade-operator/pkg/webhook" mutator "github.com/chaosblade-io/chaosblade-operator/pkg/webhook/pod" "github.com/chaosblade-io/chaosblade-operator/version" ) func printVersion() { logrus.Infof("Go Version: %s", runtime.Version()) logrus.Infof("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH) logrus.Infof("Version of operator-sdk: %v", sdkVersion.Version) logrus.Infof("Operator Version: %v", version.Version) logrus.Infof("Operator Product: %v", version.Product) logrus.Infof("Build Time: %v", version.BuildTime) logrus.Infof("Git Commit: %v", version.GitCommit) logrus.Infof("Git Branch: %v", version.GitBranch) logrus.Infof("Platform: %v", version.Platform) logrus.Infof("Daemonset Enable: %t", chaosblade.DaemonsetEnable) } func main() { pflag.CommandLine.AddFlagSet(zap.FlagSet()) pflag.CommandLine.AddFlagSet(operator.FlagSet()) pflag.CommandLine.AddFlagSet(webhookcfg.FlagSet()) pflag.CommandLine.AddGoFlagSet(flag.CommandLine) // 添加版本标志 showVersion := pflag.Bool("version", false, "显示版本信息") pflag.Parse() // 如果只是查看版本,则显示后退出 if *showVersion { printVersion() return } initLogger() printVersion() cfg, err := config.GetConfig() if err != nil { logrus.Fatalf("Get apiserver config error, %v", err) } err = leader.Become(context.Background(), "chaosblade-operator-lock") if err != nil { logrus.Fatalf("Become leader error, %v", err) } cfg.QPS = operator.QPS mgr, err := createManager(cfg) if err != nil { logrus.Fatalf("Create operator manager error, %v", err) } addComponentsToManager(mgr) logrus.Infoln("Starting the manager.") if err := mgr.Start(signals.SetupSignalHandler()); err != nil { logrus.Fatalf("Manager exited non-zero, %v", err) } } func addComponentsToManager(mgr manager.Manager) { logrus.Infof("Add all resources to scheme") // Setup Scheme for all resources if err := apis.AddToScheme(mgr.GetScheme()); err != nil { logrus.Fatalf("Add all resources to scheme error, %v", err) } logrus.Infof("Add all controllers to manager") // Setup all Controllers if err := controller.AddToManager(mgr); err != nil { logrus.Fatalf("Add all controllers to manager error, %v", err) } if webhookcfg.Enable { logrus.Infof("Webhook enabled, add it to manager") if err := addWebhook(mgr); err != nil { logrus.Fatalf("Add webhook to manager error, %v", err) } } } // Init logrus and controller-runtime log func initLogger() { level, err := logrus.ParseLevel(operator.LogLevel) if err != nil { level = logrus.InfoLevel } logrus.SetLevel(level) log.SetLogger(zap.Logger()) } func addWebhook(m manager.Manager) error { server := webhook.NewServer(webhook.Options{ Port: webhookcfg.Port, }) if err := m.Add(server); err != nil { return err } logrus.Infof("registering %s to the webhook server", "mutating-pods") server.Register("/mutating-pods", &webhook.Admission{Handler: &mutator.Mutator{}}) return nil } // createManager supports multi namespaces configuration func createManager(cfg *rest.Config) (manager.Manager, error) { scheme := apiruntime.NewScheme() runtimeutil.Must(metav1.AddMetaToScheme(scheme)) runtimeutil.Must(corev1.AddToScheme(scheme)) runtimeutil.Must(appsv1.AddToScheme(scheme)) runtimeutil.Must(apis.AddToScheme(scheme)) watchNamespace, err := k8sutil.GetWatchNamespace() if err != nil { return nil, err } logrus.Infof("Get watch namespace is %s", watchNamespace) if strings.Contains(watchNamespace, ",") { defaultNsps := make(map[string]cache.Config) for _, nsp := range strings.Split(watchNamespace, ",") { defaultNsps[nsp] = cache.Config{} } return manager.New(cfg, manager.Options{ Cache: cache.Options{ Scheme: scheme, DefaultNamespaces: defaultNsps, }, Scheme: scheme, MapperProvider: func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { return apiutil.NewDynamicRESTMapper(c, httpClient) }, NewClient: channel.NewClientFunc(), }) } return manager.New(cfg, manager.Options{ Cache: cache.Options{ Scheme: scheme, DefaultNamespaces: map[string]cache.Config{watchNamespace: {}}, }, Scheme: scheme, MapperProvider: func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) { return apiutil.NewDynamicRESTMapper(c, httpClient) }, NewClient: channel.NewClientFunc(), }) } ================================================ FILE: deploy/crds/chaosblade.io_chaosblades_crd.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: chaosblades.chaosblade.io spec: group: chaosblade.io names: kind: ChaosBlade listKind: ChaosBladeList plural: chaosblades singular: chaosblade scope: Namespaced versions: - name: v1alpha1 schema: openAPIV3Schema: description: ChaosBlade is the Schema for the chaosblades API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: ChaosBladeSpec defines the desired state of ChaosBlade properties: experiments: description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: description: Action is the experiment scenario of the target, such as delay, load type: string desc: description: Desc is the experiment description type: string matchers: description: Matchers is the experiment rules items: properties: name: description: Name is the name of flag type: string value: description: 'TODO: Temporarily defined as an array for all flags Value is the value of flag' items: type: string type: array required: - name - value type: object type: array scope: description: Scope is the area of the experiments, currently support node, pod and container type: string target: description: Target is the experiment target, such as cpu, network type: string required: - action - scope - target type: object type: array required: - experiments type: object status: description: ChaosBladeStatus defines the observed state of ChaosBlade properties: expStatuses: description: 'Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: type: string error: type: string resStatuses: description: ResStatuses is the details of the experiment items: properties: error: description: experiment error type: string id: description: experiment uid in chaosblade type: string identifier: description: 'Resource identifier, rules as following: container: Namespace/NodeName/PodName/ContainerName pod: Namespace/NodeName/PodName' type: string kind: description: Kind type: string state: description: experiment state type: string success: description: success type: boolean required: - kind - state - success type: object type: array scope: description: experiment scope for cache type: string state: description: State is used to describe the experiment result type: string success: description: Success is used to judge the experiment result type: boolean target: type: string required: - action - scope - state - success - target type: object type: array phase: description: Phase indicates the state of the experiment Initial -> Running -> Updating -> Destroying -> Destroyed type: string required: - expStatuses type: object type: object served: true storage: true subresources: status: {} ================================================ FILE: deploy/helm/chaosblade-operator/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj ================================================ FILE: deploy/helm/chaosblade-operator/Chart.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 appVersion: "1.8.0" description: ChaosBlade Operator name: chaosblade-operator version: 1.8.0 home: https://github.com/chaosblade-io ================================================ FILE: deploy/helm/chaosblade-operator/crds/crd.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: chaosblades.chaosblade.io spec: group: chaosblade.io names: kind: ChaosBlade listKind: ChaosBladeList plural: chaosblades singular: chaosblade shortNames: [blade] scope: Cluster versions: - name: v1alpha1 schema: openAPIV3Schema: description: ChaosBlade is the Schema for the chaosblades API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: ChaosBladeSpec defines the desired state of ChaosBlade properties: experiments: description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: description: Action is the experiment scenario of the target, such as delay, load type: string desc: description: Desc is the experiment description type: string matchers: description: Matchers is the experiment rules items: properties: name: description: Name is the name of flag type: string value: description: 'TODO: Temporarily defined as an array for all flags Value is the value of flag' items: type: string type: array required: - name - value type: object type: array scope: description: Scope is the area of the experiments, currently support node, pod and container type: string target: description: Target is the experiment target, such as cpu, network type: string required: - action - scope - target type: object type: array required: - experiments type: object status: description: ChaosBladeStatus defines the observed state of ChaosBlade properties: expStatuses: description: 'Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: type: string error: type: string resStatuses: description: ResStatuses is the details of the experiment items: properties: error: description: experiment error type: string id: description: experiment uid in chaosblade type: string identifier: description: 'Resource identifier, rules as following: container: Namespace/NodeName/PodName/ContainerName pod: Namespace/NodeName/PodName' type: string kind: description: Kind type: string state: description: experiment state type: string success: description: success type: boolean required: - kind - state - success type: object type: array scope: description: experiment scope for cache type: string state: description: State is used to describe the experiment result type: string success: description: Success is used to judge the experiment result type: boolean target: type: string required: - action - scope - state - success - target type: object type: array phase: description: Phase indicates the state of the experiment Initial -> Running -> Updating -> Destroying -> Destroyed type: string required: - expStatuses type: object type: object served: true storage: true subresources: status: {} ================================================ FILE: deploy/helm/chaosblade-operator/templates/NOTES.txt ================================================ Thank you for using chaosblade. ================================================ FILE: deploy/helm/chaosblade-operator/templates/_helpers.tpl ================================================ {{/* vim: set filetype=mustache: */}} ================================================ FILE: deploy/helm/chaosblade-operator/templates/daemonset.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. {{- if .Values.daemonset.enable }} apiVersion: apps/v1 kind: DaemonSet metadata: name: chaosblade-tool labels: name: chaosblade-tool app: chaosblade-tool spec: selector: matchLabels: name: chaosblade-tool app: chaosblade-tool updateStrategy: type: RollingUpdate template: metadata: labels: name: chaosblade-tool app: chaosblade-tool spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: type operator: NotIn values: - virtual-kubelet containers: - name: chaosblade-tool image: {{ .Values.blade.repository }}:{{ .Values.blade.version }} imagePullPolicy: {{ .Values.blade.pullPolicy }} env: - name: KUBERNETES_NODENAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: DOCKER_API_VERSION value: "1.44" - name: CGROUP_ROOT value: "/host-sys/fs/cgroup/" securityContext: privileged: true volumeMounts: - mountPath: /var/run/docker.sock name: docker-socket - mountPath: /opt/chaosblade/chaosblade.dat name: chaosblade-db-volume - mountPath: /etc/hosts name: hosts - mountPath: /var/log/audit name: audit - mountPath: /var/lib/docker name: docker-lib - mountPath: /etc/docker name: docker-etc - mountPath: /run/containerd name: containerd - mountPath: /var/lib/containerd name: containerd-lib - mountPath: /etc/containerd name: containerd-etc - mountPath: /var/run/netns name: netns - mountPath: /host-sys name: sys dnsPolicy: ClusterFirstWithHostNet hostNetwork: true hostPID: true tolerations: - effect: NoSchedule operator: Exists volumes: - hostPath: path: /var/run/docker.sock name: docker-socket - hostPath: path: /var/run/chaosblade.dat type: FileOrCreate name: chaosblade-db-volume - hostPath: path: /etc/hosts name: hosts - hostPath: path: /var/lib/docker name: docker-lib - hostPath: path: /etc/docker name: docker-etc - hostPath: path: /var/log/audit name: audit - hostPath: path: /run/containerd name: containerd - hostPath: path: /var/lib/containerd name: containerd-lib - hostPath: path: /etc/containerd name: containerd-etc - hostPath: path: /var/run/netns name: netns - hostPath: path: /sys name: sys serviceAccountName: chaosblade {{- end }} ================================================ FILE: deploy/helm/chaosblade-operator/templates/deployment.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apps/v1 kind: Deployment metadata: name: chaosblade-operator namespace: {{ .Release.Namespace }} spec: replicas: 1 selector: matchLabels: name: chaosblade-operator template: metadata: labels: name: chaosblade-operator part-of: chaosblade spec: dnsPolicy: {{ .Values.network.dns.policy }} hostNetwork: {{ .Values.network.host }} serviceAccountName: chaosblade initContainers: - name: chaosblade-tool image: {{ .Values.blade.repository }}:{{ .Values.blade.version }} imagePullPolicy: {{ .Values.blade.pullPolicy }} command: [ "cp", "-R","/opt/chaosblade", "/home" ] volumeMounts: - mountPath: /home name: chaosblade containers: - name: chaosblade-operator image: {{ .Values.operator.repository }}:{{ .Values.operator.version }} command: ["chaosblade-operator"] args: {{- if .Values.blade.repository }} - '--chaosblade-image-repository={{ .Values.blade.repository }}' {{- end }} {{- if .Values.blade.version }} - '--chaosblade-version={{ .Values.blade.version }}' {{- end }} {{- if .Values.blade.pullPolicy }} - '--chaosblade-image-pull-policy={{ .Values.blade.pullPolicy }}' {{- end }} {{- if .Values.env.zapLevel }} - '--zap-level={{ .Values.env.zapLevel }}' {{- end }} {{- if .Values.env.logLevel }} - '--log-level={{ .Values.env.logLevel }}' {{- end }} {{- if .Values.webhook.enable }} - '--webhook-enable' {{- end }} {{- if .Values.daemonset.enable }} - '--daemonset-enable' {{- end }} {{- if .Values.remove.blade.interval }} - '--remove-blade-interval={{ .Values.remove.blade.interval }}' {{- end }} {{- if .Values.blade.downloadUrl }} - '--chaosblade-download-url={{ .Values.blade.downloadUrl }}' {{- end }} - '--chaosblade-namespace={{ .Release.Namespace }}' imagePullPolicy: {{ .Values.operator.pullPolicy }} env: - name: WATCH_NAMESPACE value: "" - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: OPERATOR_NAME value: "chaosblade-operator" ports: - containerPort: 9443 protocol: TCP volumeMounts: - mountPath: /tmp/k8s-webhook-server/serving-certs name: cert readOnly: true - mountPath: /opt name: chaosblade volumes: - name: cert secret: defaultMode: 420 secretName: chaosblade-webhook-server-cert - name: chaosblade emptyDir: {} ================================================ FILE: deploy/helm/chaosblade-operator/templates/rbac.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: ServiceAccount metadata: name: chaosblade labels: name: chaosblade namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: chaosblade labels: name: chaosblade rules: - apiGroups: - '' resources: - pods - pods/exec - configmaps - secrets - services - persistentvolumeclaims - persistentvolumes verbs: - "*" - apiGroups: - '' resources: - nodes verbs: - get - list - watch - update - apiGroups: - apps resources: - daemonsets - deployments - statefulsets verbs: - "*" - apiGroups: - chaosblade.io resources: - chaosblades - chaosblades/status verbs: - "*" --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: chaosblade labels: name: chaosblade roleRef: kind: ClusterRole name: chaosblade apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: chaosblade namespace: {{ .Release.Namespace }} ================================================ FILE: deploy/helm/chaosblade-operator/templates/secret.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. {{- $ca := genCA "chaosblade-webhook-server-ca" 3650 }} {{- $cn := "chaosblade-webhook-server" }} {{- $dns1 := printf "%s.%s" $cn .Release.Namespace }} {{- $dns2 := printf "%s.%s.svc" $cn .Release.Namespace }} {{- $cert := genSignedCert $cn nil (list $dns1 $dns2) 3650 $ca }} apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: name: chaosblade-operator namespace: {{ .Release.Namespace }} labels: app: chaosblade-operator chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" webhooks: - clientConfig: caBundle: {{ $ca.Cert | b64enc | quote }} service: name: chaosblade-webhook-server namespace: {{ .Release.Namespace }} path: /mutating-pods name: "{{ .Chart.Name }}.{{ .Release.Namespace }}.svc" failurePolicy: Ignore rules: - apiGroups: - "" apiVersions: - v1 operations: - CREATE - UPDATE resources: - pods sideEffects: None admissionReviewVersions: ["v1beta1"] --- apiVersion: v1 kind: Secret metadata: name: chaosblade-webhook-server-cert namespace: {{ .Release.Namespace }} labels: app: chaosblade-operator chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" heritage: {{ .Release.Service }} release: {{ .Release.Name }} type: kubernetes.io/tls data: tls.crt: {{ $cert.Cert | b64enc | quote }} tls.key: {{ $cert.Key | b64enc | quote }} ca.crt: {{ $ca.Cert | b64enc | quote }} ================================================ FILE: deploy/helm/chaosblade-operator/templates/service.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: Service metadata: name: chaosblade-webhook-server namespace: {{ .Release.Namespace }} spec: ports: - port: 443 targetPort: 9443 selector: name: chaosblade-operator ================================================ FILE: deploy/helm/chaosblade-operator/values.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Default values for chaosblade. # chaosblade-operator operator: repository: ghcr.io/chaosblade-io/chaosblade-operator version: 1.8.0 # image.pullPolicy: must be Always|IfNotPresent|Never pullPolicy: IfNotPresent # qps of kubernetes client qps: 20 reconcileCount: 20 blade: repository: ghcr.io/chaosblade-io/chaosblade-tool version: 1.8.0 pullPolicy: IfNotPresent downloadUrl: "" env: logLevel: info webhook: enable: true daemonset: enable: true remove: blade: interval: 72h network: host: false dns: policy: ClusterFirst ================================================ FILE: deploy/helm/chaosblade-operator-arm64/.helmignore ================================================ # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *~ # Various IDEs .project .idea/ *.tmproj ================================================ FILE: deploy/helm/chaosblade-operator-arm64/Chart.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 appVersion: "1.8.0" description: ChaosBlade Operator name: chaosblade-operator-arm64 version: 1.8.0 home: https://github.com/chaosblade-io ================================================ FILE: deploy/helm/chaosblade-operator-arm64/crds/crd.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: chaosblades.chaosblade.io spec: group: chaosblade.io names: kind: ChaosBlade listKind: ChaosBladeList plural: chaosblades singular: chaosblade shortNames: [blade] scope: Cluster versions: - name: v1alpha1 schema: openAPIV3Schema: description: ChaosBlade is the Schema for the chaosblades API properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: ChaosBladeSpec defines the desired state of ChaosBlade properties: experiments: description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: description: Action is the experiment scenario of the target, such as delay, load type: string desc: description: Desc is the experiment description type: string matchers: description: Matchers is the experiment rules items: properties: name: description: Name is the name of flag type: string value: description: 'TODO: Temporarily defined as an array for all flags Value is the value of flag' items: type: string type: array required: - name - value type: object type: array scope: description: Scope is the area of the experiments, currently support node, pod and container type: string target: description: Target is the experiment target, such as cpu, network type: string required: - action - scope - target type: object type: array required: - experiments type: object status: description: ChaosBladeStatus defines the observed state of ChaosBlade properties: expStatuses: description: 'Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: type: string error: type: string resStatuses: description: ResStatuses is the details of the experiment items: properties: error: description: experiment error type: string id: description: experiment uid in chaosblade type: string identifier: description: 'Resource identifier, rules as following: container: Namespace/NodeName/PodName/ContainerName pod: Namespace/NodeName/PodName' type: string kind: description: Kind type: string state: description: experiment state type: string success: description: success type: boolean required: - kind - state - success type: object type: array scope: description: experiment scope for cache type: string state: description: State is used to describe the experiment result type: string success: description: Success is used to judge the experiment result type: boolean target: type: string required: - action - scope - state - success - target type: object type: array phase: description: Phase indicates the state of the experiment Initial -> Running -> Updating -> Destroying -> Destroyed type: string required: - expStatuses type: object type: object served: true storage: true subresources: status: {} ================================================ FILE: deploy/helm/chaosblade-operator-arm64/templates/NOTES.txt ================================================ Thank you for using chaosblade. ================================================ FILE: deploy/helm/chaosblade-operator-arm64/templates/_helpers.tpl ================================================ {{/* vim: set filetype=mustache: */}} ================================================ FILE: deploy/helm/chaosblade-operator-arm64/templates/daemonset.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. {{- if .Values.daemonset.enable }} apiVersion: apps/v1 kind: DaemonSet metadata: name: chaosblade-tool labels: name: chaosblade-tool app: chaosblade-tool spec: selector: matchLabels: name: chaosblade-tool app: chaosblade-tool updateStrategy: type: RollingUpdate template: metadata: labels: name: chaosblade-tool app: chaosblade-tool spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: type operator: NotIn values: - virtual-kubelet containers: - name: chaosblade-tool image: {{ .Values.blade.repository }}:{{ .Values.blade.version }} imagePullPolicy: {{ .Values.blade.pullPolicy }} env: - name: KUBERNETES_NODENAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: DOCKER_API_VERSION value: "1.14.0" - name: CGROUP_ROOT value: "/host-sys/fs/cgroup/" securityContext: privileged: true volumeMounts: - mountPath: /var/run/docker.sock name: docker-socket - mountPath: /opt/chaosblade/chaosblade.dat name: chaosblade-db-volume - mountPath: /etc/hosts name: hosts - mountPath: /var/log/audit name: audit - mountPath: /var/lib/docker name: docker-lib - mountPath: /etc/docker name: docker-etc - mountPath: /run/containerd name: containerd - mountPath: /var/lib/containerd name: containerd-lib - mountPath: /etc/containerd name: containerd-etc - mountPath: /var/run/netns name: netns - mountPath: /host-sys name: sys dnsPolicy: ClusterFirstWithHostNet hostNetwork: true hostPID: true tolerations: - effect: NoSchedule operator: Exists volumes: - hostPath: path: /var/run/docker.sock name: docker-socket - hostPath: path: /var/run/chaosblade.dat type: FileOrCreate name: chaosblade-db-volume - hostPath: path: /etc/hosts name: hosts - hostPath: path: /var/lib/docker name: docker-lib - hostPath: path: /etc/docker name: docker-etc - hostPath: path: /var/log/audit name: audit - hostPath: path: /run/containerd name: containerd - hostPath: path: /var/lib/containerd name: containerd-lib - hostPath: path: /etc/containerd name: containerd-etc - hostPath: path: /var/run/netns name: netns - hostPath: path: /sys name: sys serviceAccountName: chaosblade {{- end }} ================================================ FILE: deploy/helm/chaosblade-operator-arm64/templates/deployment.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apps/v1 kind: Deployment metadata: name: chaosblade-operator namespace: {{ .Release.Namespace }} spec: replicas: 1 selector: matchLabels: name: chaosblade-operator template: metadata: labels: name: chaosblade-operator part-of: chaosblade spec: dnsPolicy: {{ .Values.network.dns.policy }} hostNetwork: {{ .Values.network.host }} serviceAccountName: chaosblade initContainers: - name: chaosblade-tool image: {{ .Values.blade.repository }}:{{ .Values.blade.version }} imagePullPolicy: {{ .Values.blade.pullPolicy }} command: [ "cp", "-R","/opt/chaosblade", "/home" ] volumeMounts: - mountPath: /home name: chaosblade containers: - name: chaosblade-operator image: {{ .Values.operator.repository }}:{{ .Values.operator.version }} command: ["chaosblade-operator"] args: {{- if .Values.blade.repository }} - '--chaosblade-image-repository={{ .Values.blade.repository }}' {{- end }} {{- if .Values.blade.version }} - '--chaosblade-version={{ .Values.blade.version }}' {{- end }} {{- if .Values.blade.pullPolicy }} - '--chaosblade-image-pull-policy={{ .Values.blade.pullPolicy }}' {{- end }} {{- if .Values.env.zapLevel }} - '--zap-level={{ .Values.env.zapLevel }}' {{- end }} {{- if .Values.env.logLevel }} - '--log-level={{ .Values.env.logLevel }}' {{- end }} {{- if .Values.webhook.enable }} - '--webhook-enable' {{- end }} {{- if .Values.daemonset.enable }} - '--daemonset-enable' {{- end }} {{- if .Values.remove.blade.interval }} - '--remove-blade-interval={{ .Values.remove.blade.interval }}' {{- end }} {{- if .Values.blade.downloadUrl }} - '--chaosblade-download-url={{ .Values.blade.downloadUrl }}' {{- end }} - '--chaosblade-namespace={{ .Release.Namespace }}' imagePullPolicy: {{ .Values.operator.pullPolicy }} env: - name: WATCH_NAMESPACE value: "" - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: OPERATOR_NAME value: "chaosblade-operator" ports: - containerPort: 9443 protocol: TCP volumeMounts: - mountPath: /tmp/k8s-webhook-server/serving-certs name: cert readOnly: true - mountPath: /opt name: chaosblade volumes: - name: cert secret: defaultMode: 420 secretName: chaosblade-webhook-server-cert - name: chaosblade emptyDir: {} ================================================ FILE: deploy/helm/chaosblade-operator-arm64/templates/rbac.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: ServiceAccount metadata: name: chaosblade labels: name: chaosblade namespace: {{ .Release.Namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: chaosblade labels: name: chaosblade rules: - apiGroups: - '' resources: - pods - pods/exec - configmaps - secrets - services - persistentvolumeclaims - persistentvolumes verbs: - "*" - apiGroups: - '' resources: - nodes verbs: - get - list - watch - update - apiGroups: - apps resources: - daemonsets - deployments - statefulsets verbs: - "*" - apiGroups: - chaosblade.io resources: - chaosblades - chaosblades/status verbs: - "*" --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: chaosblade labels: name: chaosblade roleRef: kind: ClusterRole name: chaosblade apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: chaosblade namespace: {{ .Release.Namespace }} ================================================ FILE: deploy/helm/chaosblade-operator-arm64/templates/secret.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. {{- $ca := genCA "chaosblade-webhook-server-ca" 3650 }} {{- $cn := "chaosblade-webhook-server" }} {{- $dns1 := printf "%s.%s" $cn .Release.Namespace }} {{- $dns2 := printf "%s.%s.svc" $cn .Release.Namespace }} {{- $cert := genSignedCert $cn nil (list $dns1 $dns2) 3650 $ca }} apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: name: chaosblade-operator namespace: {{ .Release.Namespace }} labels: app: chaosblade-operator chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" release: "{{ .Release.Name }}" heritage: "{{ .Release.Service }}" webhooks: - clientConfig: caBundle: {{ $ca.Cert | b64enc | quote }} service: name: chaosblade-webhook-server namespace: {{ .Release.Namespace }} path: /mutating-pods name: "{{ .Chart.Name }}.{{ .Release.Namespace }}.svc" failurePolicy: Ignore rules: - apiGroups: - "" apiVersions: - v1 operations: - CREATE - UPDATE resources: - pods sideEffects: None admissionReviewVersions: ["v1beta1"] --- apiVersion: v1 kind: Secret metadata: name: chaosblade-webhook-server-cert namespace: {{ .Release.Namespace }} labels: app: chaosblade-operator chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" heritage: {{ .Release.Service }} release: {{ .Release.Name }} type: kubernetes.io/tls data: tls.crt: {{ $cert.Cert | b64enc | quote }} tls.key: {{ $cert.Key | b64enc | quote }} ca.crt: {{ $ca.Cert | b64enc | quote }} ================================================ FILE: deploy/helm/chaosblade-operator-arm64/templates/service.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: Service metadata: name: chaosblade-webhook-server namespace: {{ .Release.Namespace }} spec: ports: - port: 443 targetPort: 9443 selector: name: chaosblade-operator ================================================ FILE: deploy/helm/chaosblade-operator-arm64/values.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Default values for chaosblade. # chaosblade-operator operator: repository: ghcr.io/chaosblade-io/chaosblade-operator-arm64 version: 1.8.0 # image.pullPolicy: must be Always|IfNotPresent|Never pullPolicy: IfNotPresent # qps of kubernetes client qps: 20 reconcileCount: 20 blade: repository: ghcr.io/chaosblade-io/chaosblade-tool-arm64 version: 1.8.0 pullPolicy: IfNotPresent downloadUrl: "" env: logLevel: info webhook: enable: true daemonset: enable: true remove: blade: interval: 72h network: host: false dns: policy: ClusterFirst ================================================ FILE: deploy/olm/Makefile ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. .PHONY: build clean BLADE_VERSION=0.6.0 # Build [OLM](https://github.com/operator-framework/operator-lifecycle-manager) build: operator-sdk olm-catalog gen-csv \ --operator-name chaosblade-operator \ --csv-version $(BLADE_VERSION) \ --update-crds --verbose # Change `olm` keyword to `chaosblade-operator` sed 's/olm/chaosblade-operator/g' deploy/olm-catalog/olm/olm.package.yaml \ > deploy/olm-catalog/chaosblade-operator/chaosblade-operator.package.yaml rm -rf deploy/olm-catalog/olm clean: rm -rf deploy/olm-catalog ================================================ FILE: deploy/olm/deploy/crd.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: chaosblades.chaosblade.io spec: group: chaosblade.io names: kind: ChaosBlade listKind: ChaosBladeList plural: chaosblades singular: chaosblade shortNames: [blade] scope: Cluster subresources: status: {} validation: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' type: string metadata: type: object spec: properties: experiments: description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: description: Action is the experiment scenario of the target, such as delay, load type: string desc: description: Desc is the experiment description type: string matchers: description: Matchers is the experiment rules items: properties: name: description: Name is the name of flag type: string value: description: 'TODO: Temporarily defined as an array for all flags Value is the value of flag' items: type: string type: array required: - name - value type: object type: array scope: description: Scope is the area of the experiments, currently support node, pod and container type: string target: description: Target is the experiment target, such as cpu, network type: string required: - scope - target - action type: object type: array required: - experiments type: object status: properties: expStatuses: description: 'Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: type: string error: type: string resStatuses: description: ResStatuses is the details of the experiment items: properties: error: description: experiment error type: string id: description: experiment uid in chaosblade type: string kind: description: Kind type: string name: description: resource name type: string nodeName: description: NodeName type: string state: description: experiment state type: string success: description: success type: boolean uid: description: resource uid type: string required: - state - kind - success type: object type: array scope: description: experiment scope for cache type: string state: description: State is used to describe the experiment result type: string success: description: Success is used to judge the experiment result type: boolean target: type: string required: - scope - target - action - success - state type: object type: array phase: description: Phase indicates the state of the experiment Initial -> Running -> Updating -> Destroying -> Destroyed type: string required: - expStatuses type: object version: v1alpha1 versions: - name: v1alpha1 served: true storage: true ================================================ FILE: deploy/olm/deploy/crds/chaosblade_v1alpha1_chaosblade_crd.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: chaosblades.chaosblade.io spec: group: chaosblade.io names: kind: ChaosBlade listKind: ChaosBladeList plural: chaosblades singular: chaosblade scope: Namespaced subresources: status: {} validation: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' type: string metadata: type: object spec: properties: experiments: description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: description: Action is the experiment scenario of the target, such as delay, load type: string desc: description: Desc is the experiment description type: string matchers: description: Matchers is the experiment rules items: properties: name: description: Name is the name of flag type: string value: description: 'TODO: Temporarily defined as an array for all flags Value is the value of flag' items: type: string type: array required: - name - value type: object type: array scope: description: Scope is the area of the experiments, currently support node, pod and container type: string target: description: Target is the experiment target, such as cpu, network type: string required: - scope - target - action type: object type: array required: - experiments type: object status: properties: expStatuses: description: 'Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: type: string error: type: string resStatuses: description: ResStatuses is the details of the experiment items: properties: error: description: experiment error type: string id: description: experiment uid in chaosblade type: string kind: description: Kind type: string name: description: resource name type: string nodeName: description: NodeName type: string state: description: experiment state type: string success: description: success type: boolean uid: description: resource uid type: string required: - state - kind - success type: object type: array scope: description: experiment scope for cache type: string state: description: State is used to describe the experiment result type: string success: description: Success is used to judge the experiment result type: boolean target: type: string required: - scope - target - action - success - state type: object type: array phase: description: Phase indicates the state of the experiment Initial -> Running -> Updating -> Destroying -> Destroyed type: string required: - expStatuses type: object version: v1alpha1 versions: - name: v1alpha1 served: true storage: true ================================================ FILE: deploy/olm/deploy/olm-catalog/chaosblade-operator/0.5.1/chaosblade-operator.v0.5.1.clusterserviceversion.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: annotations: capabilities: Basic Install categories: Chaos Engineering containerImage: chaosbladeio/chaosblade-operator:0.5.1 createdAt: 2020-02-11T15:40:00Z certified: "false" support: chaosblade.io repository: https://github.com/chaosblade-io/chaosblade-operator description: A chaos engineering operator for cloud-native on Kubernetes environments. alm-examples: |- [ { "apiVersion": "chaosblade.io/v1alpha1", "kind": "ChaosBlade", "metadata": { "name": "delay-pod-network-by-names" }, "spec": { "experiments": [ { "scope": "pod", "target": "network", "action": "delay", "desc": "delay pod network by names", "matchers": [ { "name": "names", "value": [ "redis-slave-674d68586-jnf7f" ] }, { "name": "namespace", "value": [ "default" ] }, { "name": "local-port", "value": [ "6379" ] }, { "name": "interface", "value": [ "eth0" ] }, { "name": "time", "value": [ "3000" ] }, { "name": "offset", "value": [ "1000" ] } ] } ] } } ] name: chaosblade-operator.v0.5.1 namespace: kube-system spec: apiservicedefinitions: {} customresourcedefinitions: owned: - description: Chaos engineering experiment definition displayName: ChaosBlade kind: ChaosBlade name: chaosblades.chaosblade.io version: v1alpha1 description: > ## Introduction Chaosblade Operator is a chaos experiments injection tool for cloud-native on kubernetes platform. By defining Kubernetes CRD to manage chaos experiments, each experiment has a very clear execution status. The tool has the characteristics of simple deployment, convenient execution, standardized implementation, and rich experiments. The chaos experimental model in chaosblade is well integrated with Kubernetes, which can realize the reuse of experiments such as basic resources, application services, and containers on the Kubernetes platform, which facilitates the expansion of resource experiments under Kubernetes, and can be executed uniformly through chaosblade cli tool. ## Supported experiments (continuously adding ...) The current experimental scenarios involve resources including Node, Pod, and Container. The specific supported experimental scenarios are as follows: * Node: * CPU: specify CPU usage * Network: specify network card, port, IP, etc. packet delay, packet loss, packet blocking, packet duplication, packet re-ordering, packet corruption, etc. * Process: specify process Hang, kill process, etc. * Disk: specify the directory disk occupation, disk IO read and write load, etc. * Memory: specify memory usage * Pod: * Network: specify network card, port, IP, etc. packet delay, packet loss, packet blocking, packet duplication, packet re-ordering, packet corruption, etc. * Disk: specify the directory disk occupation, disk IO read and write load, etc. * Memory: specify memory usage * Pod: kill pod * Container: * CPU: specify CPU usage * Network: specify network card, port, IP, etc. packet delay, packet loss, packet blocking, packet duplication, packet re-ordering, packet corruption, etc. * Process: specify process Hang, kill process, etc. * Disk: specify the directory disk occupation, disk IO read and write load, etc. * Memory: specify memory usage * Container: remove container ## Install and uninstall Chaosblade operator can be installed through kubectl or helm, the installation method is as follows: Note: For the following `VERSION`, please use the latest version number instead ### Helm v2 * Download the latest `chaosblade-operator-VERSION-v2.tgz` package at [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) * Install using `helm install --namespace kube-system --name chaosblade-operator chaosblade-operator-VERSION-v2.tgz` * Use `kubectl get pod -l part-of=chaosblade -n kube-system` to check the installation status of the Pod. If both are running, the installation was successful * Use the following command to uninstall, pay attention to the execution order: ```shell script kubectl delete crd chaosblades.chaosblade.io helm del --purge chaosblade-operator ``` ### Helm v3 * Download the latest `chaosblade-operator-VERSION-v3.tgz` package at [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) * Use `helm install chaosblade-operator chaosblade-operator-VERSION-v3.tgz --namespace kube-system` command to install * Use `kubectl get pod -l part-of=chaosblade -n kube-system` to check the installation status of the Pod. If both are running, the installation was successful * Use the following command to uninstall, pay attention to the execution order: ```shell script kubectl delete crd chaosblades.chaosblade.io helm uninstall chaosblade-operator -n kube-system ``` ### Kubectl * Download the latest `chaosblade-operator-yaml-VERSION.tar.gz` package at [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) * After decompression, execute `kubectl apply -f chaosblade-operator-yaml-VERSION/` installation * Use `kubectl get pod -l part-of=chaosblade -n kube-system` to check the installation status of the Pod. If both are running, the installation was successful * Use the following command to uninstall, pay attention to the execution order: ```shell script kubectl delete crd chaosblades.chaosblade.io kubectl delete -f chaosblade-operator-yaml-VERSION/ ``` ## How to use You can run chaos experiments after installing the chaosblade operator. There are three ways to execute chaos experiments: * By configuring yaml file, use kubectl to execute * Executed using chaosblade cli tool * Use Kubernetes API to execute by writing code The following uses a specific case to illustrate the use of chaosblade-operator: simulate cn-hangzhou.192.168.0.205 node local port 40690 60% network packet loss. ### By configuring the yaml file, use kubectl to execute ``` apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: loss-node-network-by-names spec: experiments: - scope: node target: network action: loss desc: "node network loss" matchers: - name: names value: ["cn-hangzhou.192.168.0.205"] - name: percent value: ["60"] - name: interface value: ["eth0"] - name: local-port value: ["40690"] ``` Execute experiment: ``` kubectl apply -f loss-node-network-by-names.yaml ``` Query the experimental status, the returned information is as follows (spec and other contents are omitted): ``` ~ » kubectl get blade loss-node-network-by-names -o json { "apiVersion": "chaosblade.io/v1alpha1", "kind": "ChaosBlade", "metadata": { "creationTimestamp": "2019-11-04T09:56:36Z", "finalizers": [ "finalizer.chaosblade.io" ], "generation": 1, "name": "loss-node-network-by-names", "resourceVersion": "9262302", "selfLink": "/apis/chaosblade.io/v1alpha1/chaosblades/loss-node-network-by-names", "uid": "63a926dd-fee9-11e9-b3be-00163e136d88" }, "status": { "expStatuses": [ { "action": "loss", "resStatuses": [ { "id": "057acaa47ae69363", "kind": "node", "name": "cn-hangzhou.192.168.0.205", "nodeName": "cn-hangzhou.192.168.0.205", "state": "Success", "success": true, "uid": "e179b30d-df77-11e9-b3be-00163e136d88" } ], "scope": "node", "state": "Success", "success": true, "target": "network" } ], "phase": "Running" } } ``` From the above, you can clearly see the running status of the chaos experiment. Run the following command to stop the experiment: ``` kubectl delete -f loss-node-network-by-names.yaml ``` Or delete this blade resource directly: ``` kubectl delete blade loss-node-network-by-names ``` You can also edit the yaml file to update the content of the experiment and the chaosblade operator will complete the update of the experiment. See more examples: [Examples](https://github.com/chaosblade-io/chaosblade-operator/tree/master/examples) ### Execute with chaosblade cli tool ``` blade create k8s node-network loss --percent 60 --interface eth0 --local-port 40690 --names cn-hangzhou.192.168.0.205 --kubeconfig config ``` If the execution fails, a detailed error message is returned; if the execution is successful, the experiment UID is returned: ``` {"code":200,"success":true,"result":"e647064f5f20953c"} ``` You can query the status of the experiment with the following command: ``` blade query k8s create e647064f5f20953c --kubeconfig config { "code": 200, "success": true, "result": { "uid": "e647064f5f20953c", "success": true, "error": "", "statuses": [ { "id": "fa471a6285ec45f5", "uid": "e179b30d-df77-11e9-b3be-00163e136d88", "name": "cn-hangzhou.192.168.0.205", "state": "Success", "kind": "node", "success": true, "nodeName": "cn-hangzhou.192.168.0.205" } ] } } ``` Destroy experiment: ``` blade destroy e647064f5f20953c ``` In addition to the above two methods, you can also use the kubernetes client-go api for execution. For details, please refer to: [executor.go](https://github.com/chaosblade-io/chaosblade/blob/master/exec/kubernetes/executor.go) code implementation. [Chinese documentation](https://chaosblade-io.gitbook.io/chaosblade-help-zh-cn/blade-create-k8s) ## Questions & Suggestions If you encounter problems during installation and use, or suggestions and new features, all projects (including other projects) can be submitted to [Github Issues](https://github.com/chaosblade-io/chaosblade/issues) You can also contact us via: * Dingding group: 23177705 * Gitter room: [chaosblade community](https://gitter.im/chaosblade-io/community) * Email: chaosblade.io.01@gmail.com * Twitter: [chaosblade.io](https://twitter.com/ChaosbladeI) ## Contributions We welcome every issue and PR. Even a punctuation mark, how to participate in the contribution please read the project contributing document, or contact us through the above method. ## Open source license Chaosblade-operator is licensed under the Apache 2.0 license. displayName: Chaosblade Operator icon: - base64data: iVBORw0KGgoAAAANSUhEUgAABugAAAESCAYAAAAfYlPwAAAACXBIWXMAAC4jAAAuIwF4pT92AAAgAElEQVR4nOzdC3Bk2V3n+f9VS1ULPW3JZmkY22HJhsHA0CE1PZ4gGNjKjhnAD3Kk9gvb0JYKKFUCs5TaBMvEzrKlWpjZmSWgVDMspFRASYMBg8EtoQHMw5TEGg+LgZYw2BjW3ZLx+ynZbuyqkvJsHOU/q7NVKpUeec/533O/n4iMbhzYefNe3cxzz+/8/ydzzgkAAAAAAAAAAACAMLo4zwAAAAAAAAAAAEA4BHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEAEdAAAAAAAAAAAAEBABHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEAEdAAAAAAAAAAAAEBABHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEAEdAAAAAAAAAAAAEBABHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEAEdAAAAAAAAAAAAEBABHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEAEdAAAAAAAAAAAAEBABHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEDdnGx410+eGRKRPhHZ/U/vFCcJCZo7ce3yWKiPdf3kGf9eVwyfxgURGTtx7fKGgWMBAAAAAAAAgKQR0JXMtZNnWiFcRf85ICKDZT8vKJ25kwHDuWu2w7lNH8ydvHZ53sCxAAAAAAAAAEApZM45rnTCNJAbaQvkCONQdssnr12uhDoHxsO5BQ3nqJoDAAAAAAAAgICooEvQF06eqbSFcgRywFNW9d4I4gvN1rFTRs//I//DtctWjw0AAAAAAAAAkkZAl4jPnzwzosGDf/WW/XwAe9hp5fhFgarFPt8M55YM3o/+PFS+6NrlFQPHAgAAAAAAAAClREBXYBoAjOmLUA7Y31ioUOrzzdayswbvy50Kwi+6dnnNwLEAAAAAAAAAQGkR0BXMPzy1p9wE7SuBA7vwxdcuz4c6XU5k3uD9ubPf3Bez3xwAAAAAAAAARJc557gKBfDkyTMDIjJJC0vg0Jbvvna5Euq0PXnyjN/X7ZyxyzR397XLYwaOAwAAAAAAAABKT6igs+9zJ89UtFpuuOznAjiCTW0BG8TnmntBmgvn/hHhHAAAAAAAAACYQkBn1GebwZyvmDtV9nMBHMPYPYH2W/tss8p11tjFmruHcA4AAAAAAAAAzCGgM+azJ88MicgUwRxwbAv3BNx3Tpr7zllqP0s4BwAAAAAAAABGEdAZ8Zmn9pgbLfu5ADogaGvLz5w84+/dQUMXbu4ZhHMAAAAAAAAAYFbmnOPqRLR58kyf7jE3Yaz6Biiy073XLgdpN7nZrHp9zNC5muslnAMAAAAAAAAA06igi2izuc+cDxH6S3sSgM5bDhXOKUv7zi0TzgEAAAAAAACAfQR0EWw0q+b8pP5w6T48kL9gAdWGrdaWqyIyYuA4AAAAAAAAAAB3QIvLwD598syIhnO0swQ679Izr12eCHFeP93cN/IJI9fQ77lXeea1yysGjgUAAAAAAAAAcAdU0AXyKarmgLz5kGoy1Fl2tlpbjjyLcA4AAAAAAAAACqOLS5W/TzX3mlshnANyNfmsa5c3QpziTzUrYU8ZuZwXnnXt8pKB4wAAAAAAAAAAHBAtLnP2yeYeVeeT/pBAfOtfcu3yQKij+OTJM2si0m/gcy9/ybXLFQPHAQAAAAAAAAA4BFpc5uQTtLQEQgrW2vITzdDdQjjnW3qOGDgOAAAAAAAAAMAhUUGXg4+fODOk4dxgch8OsGf9S6+HqZ77+Imd4N1Xz/UaOAsPfen1y/MGjgMAAAAAAAAAcEhU0HXYx07s7Dc3b2QCHyiDYNVzTmTCyL29cC/hHAAAAAAAAAAUFhV0HfSxE2fGRORKMh8IsG/93kDVcx+zUz3nW1sO3Hv98kbk4wAAAAAAAAAAHBEVdB3y0RM7+1KdT+LDAMUxG+pIDVXPTXwZ4RwAAAAAAAAAFBoVdB3wkRNnfEgwWvgPAhTLTiXZlwcIqz5ip3pu+cuvX65EPgYAAAAAAAAAwDF1cQKPh3AOiGY+RDinrFTPBdtvDwAAAAAAAACQH1pcHsOHCeeAmIKFVU5kzMCVnvvH1y8vGTgOAAAAAAAAAMAxEdAd0YcI54CYlp99/fJaiPf/0IkzPpzrN3C1qZ4DAAAAAAAAgETQ4vIICOeA6GYDHoCJ6rlQgSQAAAAAAAAAIH9U0B3SBwnngNg2/f5zIY7hgyfODIjIKQOfmeo5AAAAAAAAAEgIAd0hfODEmQnCOSC6+edev7wR4iCcyISBzzv3XKrnAAAAAAAAACAptLg8oA8096G6WIiDBdIWpHpOjRg4k1TPAQAAAAAAAEBiMucc1/QO3n/izJCIPGb6IPPhWwmuiMiG/lN2/TvKxVeTDcf+m3ze9ct9Id7o/SfO+HDu0RDvtY/l512/XIl8DAAAAAAAAACADqPF5R28v7kH1ZLpg+yMdf2cK63X8wK1EUQxvP/EmSkDB1q26jkL5xwAAAAAAAAA0GEEdPtYP3GmTwOBXrMHeXSb+tl8KLfUzx5X2IfeC4MGzlGwsNzFD+jW+69fDhlIAgAAAAAAAAACIaDbh2tWr1gIJTqlFcrNDzDxj0NwIlbaLAb5u11rtrWNHcxzjwIAAAAAAABAogjobmPtxJkxERk1eXCHtywiswPXL88W7cBhxpCBA1kdCNd2dSzQ++yH9pYAAAAAAAAAkCgCuj080ayeSWFyfM5/judfv7xi4FhQYEYq6IJVlBn4vKvPp+0sAAAAAAAAACSLgG4PTmS24PvO+WBu8gVM8KNDnI0KuiD7zz1uY789ql0BAAAAAAAAIGEEdLs8fuLMZIH3nSOYQ8c9fuLMgIXA+gXXLwcJ6MRGtWCozwoAAAAAAAAAiICArs37ToxXRLLzZg7o4Pwec5NfcX2GSX10nJPMxP5zod7ISRY7oFv/iusztKUFAAAAAAAAgIQR0LVxxWsrt+mDua+8PpPCfnkwqkztLcXG/nPB9toDYF81q/kq5gE90Pbvp/b//CA2RKQ9/F/R/0wWXZ0FPgAAAAAAAIER0Km/OzHuW1v2mziYg/FVc2P/5PoM7SyRKyMBXbCKMhe/xS0T5UDJVLOa3/tySF8Dbf/s9LhkeK//sJrVWv+63Bbk+fHFyqKrU9ELAAAAAACQAwK6ZjjnJ8GK0tpyp2run1A1h3D6DJzrIEH03+20uY2OgA5IXDWrVTSEa/3TygKhU/rPm0GehnerGtotEdoBAAAAAAB0BgFdsVpbrovIyFexPxUCck9N2EbzVYH2VzRQLbj6VddnNiIfA4AOq2a1VhhXuV0Vm3GD+hqV5ufZ1LDOv+YXXZ1qfgAAAAAAgEMqfUD33hPjI2IggDiABd/S8oVM3iOg954Yt1A9tx7qjdzh9nPKA+E7kAgN5cb8wpqCtdA+iF4NGv3rYjWrrev+mbNU1wEAAAAAABxM6QM6J1KEVpFzX319ZszAcaBkjOw/F6wyw8DnZWIbKLDEQ7n9+M96zr+0um6WsA4AAAAAAGB/pQ7o3nNifLIAE2inv+b6TFFacCIxzsbHCTbBSwUdgMOqZrU+DeQmtA1k2fW2hXW+sm5Kwzo6AAAAAAAAALQpbUD3nhPjAzqZZhnhHGKrGDiGkJO6UQP7rwm01x6A46tmNT+OmNRwrpdTuif/nXpR22DOaVDH9xwAAAAAACg9KXNA55qTapYn1E5/LeEcIitTBd27m6F9TMH22gNwdNWsVtE2lqOcxkPx52u0mtWWfbBJUHcw+vc2ogtmNhZd3cLCGQAAclHNakYeQQtrVRe4runLP0uv0XY8H9pJo6KvEV2MNpniZwVSx3MXYiplQPfXJ8YrxifWTv9TwjkYYKDlo4SqoDPwWYPttQfg8HR/Od+u8RSn71j8+btKULc3rcwcaZvsaV9MtmzpWAEAgDmtdutPG69Ws5roOMKPu1YWXX2eS3c0+kzQmsjnuQAoqF3PXcO7PgXPXQiqrBV0lle0EM7BEgsBXVkQ0AEGtbWypGKuswjqVDWrjbRN9FjfGxkAABTTqVagpIHdgoj4oG6evYJvr22/6dbiKcZqQAHtUfHKvQwzShfQ/dWJ8THDq1we+TrCORhiob/I1wXal82JDIV4n30Q0AGG6ADe71V7nuuSq1ZQ5/eomyjDBBErrwEAgAHD+rpSzWoLGtQxH/XUWG1EX4MH+K8AMIjnLhRFqQK6d50Y7zNcPTd33/WZKQPHAdxUpg0AnEhf5ENg1SJghFY0TbGqLihfoThSzWq+mi6p8VDbas0RVl4DAACDdsI6Pw7z+6j5cXCZquq0Y0b7WK33AP81AMbsqngd4V5GUZStgm7C6KTI6n3XZ8YMHAewW+zQqkzYuBuITAf0s3v0oEcY/gHqogakY4uuXtjKYlZeAwCAAurX7hET1aw2lXJQ19ZivMJYDSiualZrb1vJvYxCKk1A95cnxgeMtqna1C8SwBxXoh+3MlULAriVPqTPssrOBN9+ZKVI1XRtK69ZrQkAAIquN7WgTsdqrVCOxXhAQVHxihSVJqBzzVZVFo0MXp+htR1MKlmLSwAlpFVzU9piEXa0qukqWk1nbqykxzbCymsAAJCoVlA3Vs1qfq/g+aJ8TFqMA+mg4hWpK0VAt3Ji3OoKmUtD12eWDBwHsCcCunD4LgDC0zaEswzyTfPjt6VqVvMhnbVWwFcNHAMAAEDefLj1aDWrLReoDbkf5z9q4DgAHB/3MpLWVYbL66vnnE7AG3qtDl2fmTBweoDbsnC/hFKWzwmgSVfhLRHOFcKghnQjZT8RAAAAEbXakDMmAwCgQ5IP6B7rGR8TJ4PW0jlxQjgH+0joSOiABPm9zXQVHv3qi6NXV26Plf1EAAAARNQak81yEQAAOL6kA7q/6BnvM1o9d+n+G7Szg33kc+RzQGp0MuE8F7awrhDSAQAARDdazWorutcbAAA4oqQDOicy6UR6jYVzm/64DJwe4I4I6AjogFT4yYNqVvOLY0a5qIVHSAcAABCfb0O+pvs6AwCAI0g2oPvznvEBETln4FB2m3jgxsyGrUMCbmuVUwOg6HRl75Lum4E0ENIBAADE16t7BRPSAQBwBMkGdE5k1ljlnH+tPnBjhj7dKAwnskEFXZjXnzUXFQDosLZwbpBzmxxCOgAAgPgI6QAAOKIkA7p39oxXnMgpgwHdhIHTAxyYhfsmFAOflYAO6DDCuVLwId1I2U8CAABAZIR0AAAcQaoVdBar1BZedGNmycBxAIXiA/dAx7vCXwaQDsK5UpllMggAACA6QjoAAA4puYDuT3vGJ5xIP9VzwPFZuX9CMNDOM1QQCZTFPOFcafRqSNdX9hMBAAAQGeMyAAAOIamA7v/tGe9zIpMGw7kL//zGzJqBUwQcihNZKlFAF/vFAwzQIdWs5ivpT3E+S2XQaAcFAACAsmFcBgDAAXWndKKcyJSu1rFkXZrHBRROqHDsDirapi5XLn6LS9qAAB1QzWqTIjJagnO5eYTWvKmHlsN+P7pFV583cCwAAABl5sdlE4uuznwYAAD7SCag+5Oe8SGjE3KT33BjZsPAcQDYh79P/6RnPOYpotoHOCYfzojI+YTOo1/ks6aLFNb0tbLo6scaV2jLIT9uav3TvwYSaQnqWyoNHPccAQAA4NguVrPa0qKrs987AAC3kUxA17BZpbb8jTdmKOtHYTWak8KxJ7uDVZY1mpPh/aHeb7d39IwPfeONGR5egCPQzeiL/pu7rnvnLWkQl0t7bA2vWpXJN6vN2oI7X7k8UtDArlf/DkYMHAsAAEDZzdItBgCA20sioHtHz/iI0eqTSQPHABRdyL3Z1mIGdDopTkAHHJIGS7MG21wfxKoe+3xegdxBtQV3/jWp53VEX8Mxj+2QfEulyqKr594eGQAAAPsa9C3oF12d+TEAAPZQ+IDuj3vG+4zu8Tb3L27MMDGEQjOwL5uEDN9dM6CLGfZX2LMSOJKpglV7bWooN2u55Y8GdrOttpEiMiYiEwUJQme1bScAAADimqhmtSlakAMAcKvCB3SuOVEUs+JlL5s6gQUU2r+4MbPx9rj7su14e8943zcF2MtRA7qYhkN91pDe3twjtGPB4zfdmKkU4oMjCN13zuIetHtZ13thtmgTFFrd56vqpnSMYz2o669mtbFFV6fVOAAAuJ3lRVcv5LOF7xag/zrU9rK6YK1Xx8BjBo4FAABTCh3Qvb1nfMBoEDaV2gQ7Sm3VwEB/qG2/pDxZ2HOv0r4nVCL6jLYhRsG1tba0zi/c8a19Cl8hq8Fie1AX+ztzP5MJ7EsIAABwi7ZW3jefk423Jx/VVpexF8UCAGBKV5EvhxOZdCK9rvnvVl7r33Rjht7aSIYT2TBwbwXZVNpX0Bn4rMmtKuz0OQLaFGHfuUu+1WIK4Vw7H9TpXiL360IOi3aq6IweGwAAQEfp+Mx3avAB3TNF5IIuFLOCuTIAAHYpbED3Rz3jFScyaiyca7XcBJLhRJbKEtB9840ZCwHd8B8199ZM6W+IgA4dp60tra0MbudDq/sXXX0i5f02/B56i64+pEGkRYzLAABA6bQtphowFNSNapUfAABQRa6gs7jyZvl/ujGTWms6wEILiiABnVoO+F63Q8UHsA99sLdckXbBh1Y+vDJwLEH4IFJEThtbpe0NVrNayN8QAAAAM9qCuiEjXQ941gUAoE0hA7rlnvExJ3KK6jkgf0baPgbbA8+JrPBd0llU0CEH/h7pN3hifTj1kE6ClI5vqaT7aFoL6RifAQCAUvN7v2nXg7nI54FxGQAAbQoX0C31jPc5kSmD4dylUzdmSrNSHuVx6saMhRaX/t6vhDjpRlp69i/1jCezspCADp1UzWq+Tc95gyfVr0iuLLp6qSvptWrQWkg3YuAYAAAAolt09bHIIV0/3Q0AAHhKESvo/GqbXgPH0W6TzW6ROAutMIIEdCJiJWhnZSGwN4utLVvhHAt1bIZ0vbpnIQAAQOkZCOlocwkAgCpUQHe1Z3zAiZw3WD03Wbkxs2HgFAG5MNL2MUhAV7kx41t6rhv4vINXE6mio4IOnVLNav57YNjYCW2Fc4wD2mhIZ2mhAQEdAACA0pAu1kLcUItvAQAwr1ABndHWlusP3pixuJof6BgjAd2pUFfUSJtL/0qiMpeADh1k7Z4gnNuH7kl3ycjhMBEEAADwdLEWhA5Ws1of1wIAgAIFdG/rGa84kWGDAR2l+UielcDqbeXah86/+t/WM174VpcEdOgErZ4LFtQfAOHcASy6+oSRNsn9un8hAAAAnup4EKvVJYunAAClJ0UK6IxWzy3/yxszSwZOD5Crf3ljxkIFnX8FaVHmROYNfc9M/kHPeKEnlQno0CGWquf83mpjhHMHZmUxExNBAAAATxdrjD3EdQAAoCAB3R80K0gGDRzKblTPoUyWDXzWIAHdv2ruKWmh4sPrFRHa6KLUDFbPjeiKYxyAnqsLBs4VE0EAAABtFl19TUQWIpwTFk4BAErP67Z+Fn6/Z7zP4J4z3oVvuTGzZuA4gCB820cDE+T9v98zPhDi3nMifu+ki3m/zwEN/37P+Ni33JiZNXI8h0LVGzrA0oKYC4uuTvX84VlYaMC4DQAA4Fbz/pkz8Hmh9TgAoPSkCAGda4ZzvQYOpd0mFS0oG9cctJ838LFHQtx/+nmtBHTe1O/1jC99awEXBhDQ4Th037BRIydxddHVLS4aMk/bgXLuAGAX/Z0b0CrfPn0dtuJ3rW0Rws4iEhaTADgE/+x7JfAJ6+cCAQBgPKD7vea+S+cMHMpuE9/abIEHlMa33phZ+b2e8U0DgflYiIDOB2G/1zO+aqi9rj/v87/XM17h+wclY6l6LkibXQBAmqpZbUjbug3pq1PjzPYuFzsL6qpZzf9jXURW9OUDuxX2TwWwm/9eqGa14M++1azWx3cSAKDsTAd02mLOmtVvK2ibOeC4tM1l6NYXuw3+bs/4wLeVr82l6APTVNH2v6SCDsdk5e/9gu7RAcQyVM1qoSti2t/PT/Cvsf/i7VWz2pTBvQ439Nq1+Gu6wXUMQ6vjRjSUq0RY6Navr+G24G5V/w7mqbI7Ht0jd3fFYxH31ZpddHXmOBAjKBvaNdZAAIxXAByGLjDr0//KXuOc1lho5Ta/Je2dHlgstgezAd1be8YrBva72suEvUMCwnBxetPvZSxEqzSDbS690bf2jK+9+MZMYVrFEdDhqKpZbcRI+5t1WlvCgN4IY+Nb3k+rcloT/DsvHrJuGjL6/NI+dmuvrlptq6xaYhFCZ7SFcmOGOjG0G9TXuWpW29TWdj6sm7dziLb4Kh+9vyttFZDWtuE4DgISiNjYcx5hMF4BcAtdeDTU1nr9sOOdA32v6H293BbMr2hwV9p723IFncUVXAsvvjHD4BVlFqM3/V6CBHQvvjGz9tae8WWDg9fzGtKx0hWps1I9x+Ic4OluTvBL8yFrQSf4+V0qltZ13Nnns62yaorJr8OrZrUx/d0q0gR3r17/0WpWW9dn8Fmu/9OC1hFCCwCIivEK0GG6+KjS9gq9qKw1troZzOvCsaW2Tg+lub9NBnS/0zM+aXDD2E0m6FB2L74xs/E7NgKr/t/pGa+8JEBgrm0uLT6UX/mdnnF5SQFCOirocBQ6YLRQsbtMVQFwR/5eHdaWSVNM8BdWe2XVqk58EbruQ3+rJjSYs/b8elj9WrFwvprV5vxiuLLdx3o9R/SaWqx+BAAwXgGOpACLj3pbz5W+m5kuHpvXZ8uk292aC+h+u2e8z2gQNvXSAHteAdZp20cLX+RjIdqx+ADst3vGp4y2sbny2z3j8lLjIR0BHY5oxMiJo7UlcHC9bRP8l3SCn/aXxeQnvq60ha5TXMuntAVzE4m1OmxpVdWVIqjTCatW0Jri9QSAVDFeAfbRtvioaF0eRBePndMwvhXWJVk5ay6gc80vVGuD4nX9ogdKz9C+bKO/1TM++bIAwbl+L53P+32O6Mpv9YwPGN87wtoG1CgGCwHd6qKr09oaOBr/MDXmJ0zYw7HQWqHrBNeySVtZWuz4kodWUJdk4K6TVpOtdr0AgMJivAK0SXDxUXtYt6zj0mTmakwFdP+tZ3yo1VPYmMlvvzHDCgxARHwg9t/s7MsWZC86bXNpNaCTVrWCgeMAOsJQe0sW5wDH06vVdDurNlNvTZK41rUc02tZusUL1aw2pL8LZdyPrBW4T6TSRsx/Fn2OoGIOaKpwHpCA0o9XUG4azE0azVc6xY/Fr2pV3WQKY9MuA8dwk69ScdoOzdBr+dsLsMcTEJIPrIzcoxOLzb**fr2GzNrTmTO4PdTKV8oBQvVc5vsZQB0jG8/9JhOiKPY+vWBeF4XU5RCNav5iY7HShrOtfRqG7GlIl97H7RWs9qKdgQhnAOeMhDhXLBwB3kp5XgF5eX/zqtZzc9fPJF4ONeuX8emK9WsVuhFJmYCusWecasbFDKRANzKt7ncNHBeegNO5NMiAQjHwuCKcA7ovItMlCTDVzkX/mH4TnSyY4lOBU/jn9nXtDK2ULSiYkkXDQBQWnERvG0ve4UhgFKMV1BuupBsrUTB3G6DbYF8jMUmx2YioPvNnvE+o9Vzc9UbM6zoAXap3pjZ8HvRGblPgwRn1WYV3XLZq9csvFAKFib9aG8J5MNPlBS6Agc3tVanJ7mgUSfz1kpeNXc7fpHco36fH5uHdytdVX6FqjlgTzHCi1UuBQJJeryC8mrrCnCe8c2OViBfuHvdxB50zu1UqVnbZHuT6jng9pzbqS6xsDqjf6F7fGx4K/9WtM7thIFX834foMx0j5/Yg8vVRVdfK/eVAHI1qBU4FfalS4KvjBxadPWxVD6QVlpdMXAo1p3TlcpjVithdDEAVXPA/mJ8f1M9h9CSG6+EoL+jQ/pWA5Ha4WIXrZqjw8OtevVeb+2BXoh5negB3UL3+IDRIGxqeGuGAQNwG8NbM0sL3ePrRsL1yRDt6PQzz5W4bBwIgfaWQDn0aiUdIV0aRnWBRaXoLcu0KuycgUMpiuG2e9nUtSecA+5Mq4VjVAovcXkQQTLjlU5rC+Iq+s8+ugjYo9dpnmtzR6e0mm5k0dXN/95Eb3Hp29M5kV5jLdTWh7dm2G8KuAO9fy3cs/3z3eNBgn5Dn7m0LyTPQkA3b+AYgDLwIR170qVjsOjtS7UNIuHc4Zm79oRzwIHFmvticQ5iKfx4pRN0n90xP/apZjVfZfRp7Rh1XhffEAAZo+HyCtfmwHqL0t42akD3aPd4xYmMGpxkprUlcAC6D92mkft28tHu8dwHWCNbO3vRXSp7SBbzheTFDujWaW8JBNXPJElSCjvppeEcXRKOztq1nyKcA/anLcBiTfRSQYeYShnStYVy8xrIXdGxj7Vtp7CLVjsvca2O5KKO882KXUFncVPp5Ye2Zlg5DxzAQ802sFbu496A7XIndZ9KAB2k+9jE3n+OMQAQ3qDR5wIczWDRvksJ5zrGxISn7svC9QT2oePuWBOWq7QXhAGFG68clQ/jd4Vyw8X8JOWkeyNfNTBXUmSjlkO6aAHdW7rHx5zIoMHqDDYLBQ7Bicwaun/Pv6W5r2WufDBJq8t4LyRtyMCHYzUvEMdoEdqP4MBOWV+p2qJ/d4Q5nRM1pNP2T+djvDdQFG17GMWa7GVBHKwozHjlKLRazneHeZRQrpg0nLtS9vPQIWZDuigB3W90j/c5kSmDE7+XXr41Q1sr4BBe3mz5OGfoPg7yZfvyrRn/HbZa9rAsxgtJI6ADym1SJ9eRBvOhq056XDRwKKkZjFiZw8Q/sA8j+zNyn8KS5BaJtQVzV2iJWFyEc7kwGdLFqqCbMFiWuRlxc1yg6CzdO6d+o3t8JNB7UXELdFbs/edotwPE1Uury+RctBq66nHx95afYW01GYy+HxORwG3o917scM7v97zCNYIxZscrh+H3KSOYSwPj1FyZC+mCB3Rv7h4faIicb4iIsdfkK5r7aQE4pDZ5DXYAACAASURBVFdszaw1ROYM3dOzb+4ez72tziu2ZlYaIhcMfp8l/ULScm9RewdMFgDxndLVokjHfOw9yXbT45llL4/cnfeThSHeSK8pbXKB29AKodjhnDDhDMPMjVcOyh+37jF3lWCu+HSP0CXGqbkyVTkbo4LO4o/x+qu2ZhgkAMdjqYquN1RbnVdtzfjPvRrivYASiP0wQUAH2DBV1AkS7KnfYKeSKQOT1GURasLTYpceIDqtqFnRdr4W7pFk9/tC4Vkcr9xRNav5DlJr7DGXBgN7hJbJxVALye4kaED3a93jFScybHBPI1bpAsf0Knt70Q3/WqBWl/47pOz7woV8IU1GBkYEdIANvVTCJOeclQdgncgaNXAoZZH7wjmq54Cn02qaMQ3mrhpakDBHO3kYZ2a8chDVrOYXHD1KmJMUFpGFZaJyNmhA50RmDU72Lrx6a2Yp5HkAUuVEJp3IpqH7e/ZXA7S6fPXWzIoTeaTswVmoF5JloVqGgA6wY4IquuRE71jS1tqyKHyXhuV9XpsF+RzDGozmZYTJSZSd36/It+vSNnef1j2orE3yFq46CaVkvsOahvB+LvucgcNBh7CILIperViMqjvUm/9q9/iE0T64rLQDOuQ7tmbWfrV73A9mzhs5p60v2txXQH3H1szUrzYr9k7l/V5AomJvyr3Jil4Yt9nhEHnA+B4V/jd8rGB71cQO+a2PQQZ9Rceiq8cMyCYNBznLut+If60c5jdJV/sP6Zi3YvQzzvo9VXL6rQ3SNQNJ6itStYwaaNu3eUgXuRXhGdRXz60ZOA4wXrkTC+OV29L9yeapskpLAReRpcTvgT6x6OrRnjuDBHRvalawWFwpc+E1WzMMEIAOcs2JNEt7QJx6U/f45Guae8XlyjUnB1bYlBc4ktiVMlTPwTo/Yd/xSURdqVnRMMzapP5EkQK6RVePvvDPV1DohO2I0b1IJmNNPui5sbbSfFX/xuePE1wtunor2Nu5X/S+trYKu9W6tqNjcp3QYt8dHNWgtoBE/qieM4LxyoFEG6/sR8/bElXjSZrlukY16SvQYy0kCRLQOZsrFTcLtiIXKITXbM1s/EqzYvaKoeM9/yvd4yuv3ZrJtWxZP7sfYD6W5/sAiYpdQUf1HEpp0dXndRWub4s1phMSVhaa9PvKBg0fcACLrr6iCw5m2/blsrRwqj/iqnRLz36+Wm4yr7/t1n1dzWqTek9bCerOV7PabIcnPyxUP62LyJpOmrZsFGjxD4uWkbdLVM+hHeOVwyOcS5dWchd5sdFy279b79JyO736rBClK0PuAd2vdI8PGO2JO/HarRkm44AcvHZrZvZXusfHjLUu8MdUee3WTK4Pyv5//1e6x08bCygB3BkVdCg9nYSY1Ul9K+2qx3ZNeuOAtCLLrwZtdTewck2Dr0rXiQ8L41K/SHRMA7Tc6YT4mN7Ts0bOwaTe150SK6Bb0MUNx6p+BEpgk+o57Ifxyp1piDlPOJesIhQQtVqx+3mTjYMsMtN2rAM6VmtVzVoO74ZjLQ7NPaBzNvunLr9ua4a+rkCOXHNgZamSzA9kln65e3zgdTmH8z6g/OXucYttlADLBrg6gA2Lrr7T4kMnAmI/RBVtbyBz2ia+5nWiKfaeJX5V+kiokEpZmBz2ExsjMcIcDeoqfn8NEbkY+v13GfWBYQeraUJX4C9ryEo1EHAwY4TYOAjGK3vTcG6JrVTSpB1MrO4nuKD34tJRvsd1rPS0DgNaCTpmdHsF0WeG4M+fXXn+j/9S9/iIEznlmpP1ll6s3gFy9rqtmRUncsnYvd/rRJZ+qbkvZq5etzUz4UTmDH7/Ff6FZMV+4KCCDmijrYeGdI+smPr1QQ7HpNfUP3DOGTiXnayg2peR6rk5v4dk7Elq3fz+Qa1oiamTz+Mhvx8u6HUknAMOZi52uIHiKet4ZR8Wwkrkx2JG4e+95y+6+k5A3cnxq7+//T6Yi67u52VPa5twS07ps0NQuQZ0Rks0575za4Y2OUAYkwa/bP3AJkhIp+0ZYk9sAjgYVvYCu+jDWMXAbxlVdB3ir+miq48ZmPQa1rY3IcSe+JjTc26Ctu2pRA7pRjt4/UOtvvbXkYW+wMGt6vMwcGglHa/cQivfi7w3Gfah1XOWKiOXNZgL0inAb6+w6Or+/rqQ93sdUvDxXm4tLt/YnPwOMQF+KF+ZSdf1k2eq1o4LSNGr7hL5w0b2iU84c6X4A/r9lOuE/HduzWy8sXt8hdVOAICi8hMkvr2PVpnGakNSKcjeDIXhH7yrWc0f7mjEYx7J+7rqpFrM6jlT4VyLX72s9/XViIcxcdzJe237FcImQQNwKJu0tkQnlGW8shftIBG7LTXyZWnhzyPaaSE4Y9srSKuKLuRedLlV0H3X1syGk2zSSSaWXh9w2fAnndyb1+cG8JS/bGQv+6TLHjD2PbDpJKt819ZM7qtBfrH7rP8OHLX2PVj0F9JD+zrANl1BGTNk4DsiBxocLUc8hBCVkTEnPlYthnMtOunwSMRD6MS5CfXdcKS9V4ASG9E2hcCxGRivjER639lI74sAdKGUhTDKL6g4HSucazG0vUJL0IVZuba4fHhresqJrFva7+gfRJ7x1y6LufICKIVPOLn3CZe9dtvWnmebTqTy8NZ07g8L/7X77JgTOV/2/eLyeCFJFirumUQA9qF7yCxEOkdsip+fkYitDocDVEDFmlTbjPjeB6aTMbHu615t7VQEjBGAgzsdsuoApRFzvHIqYMX2jmpWm6QTU/KsjIH8ggoTYbCh7RUkdHvb3FpctrjmH1zM1hm3+IjLvvkJJ299fubeZem4gJS822WjPhA39pEmRgOEc3PdZ/2qjyt5vw+AzmFlPHAg0fbBCN1mpCy0helExHFLRdvZdJyGP7Hask6E2LujQ/x5Wot0rkaoEACSctrKRC/SkvJ4ZTcNBc6HeK9jWg24j3vMduUdp9fYwt6C5hZU6L1e0cVRsRdpjoXqxpF7QDe6Nb0023122dLNtCUi73XZ65+fuR82cDhAcp5w2X0+CDdW7XRhbGs694eF2e6zfmUVE4gAgOT4wKGa1eYi7wOCDvOTqRpmxXheG8pxwitWBdtykSaodSJkMtI+NztVlEddJOMnlXRvorwFW0ENFBjhHHKV8HhlN4v30YIGJn6uayX04tZqVkutmZKF6rk5q9/ZbXugPxb5UIIFdLm2uGxjrnXFhpMX/pXLQux7AJSOD8C3bH3ohbGt6VB7kCxFXK0NAEDeQk1O7MY+dPmKtVdbLs9j2ooq1spkSxv+H4i2ulyP9PbmW4H6RQm6mhvA3i4QziGQpMYru+lvjZUCF19sc1pEnrno6r4N4qRfGEPnmY6InZOsh95j7bB0T7oLkQ+jv5rVgjyDBgnoxram15zIJUt7GPl9sR5vZGeeFLk7xDkAyuJdLqt82skLDd3v6y7Qj9+V7rN+383Bsu8Rl/cLABCP7kUXYw8QC3tVJkvb28QIaPJ66I1ZPVfUTgqxJj2Pe61CfR/N62puALeaIMRGCAmOV3azsMjHB3MPLrq6by8/SyDXWRr4xG7dOFmQ6xpzAVlLkPnkUBV0flJ10olsWprk/YzIPauN7BWhzgGQus+J3O2D721bgc7I6a3p3H94fqH77IgTOVf28CzECwAQHa2c0zQV4VP1arVbp8WaKI5xDjtCq19ihO/Dx/wbyH1/aeU7ZDxazWrzuncMgKffH1e1/SCQt5TGKzcZqJ7zY4BHNJhjrJ+f2IsZCtOKXUPE2KF1kMVZwQK6725OkJsrn/ygy175cSf3GjgUoPDe08he+lmRewx9jgvfvTWd+0P7LzT3naOlBwCgLEJNiCOslNqXxqh0WtcK0yKLNZ49zmTVWgeP4yB869SValabIqgDbnGFkA4BpNpuPeacuQ/nKtryGvmK/R1ZqLnLiAvIWoK0uQwW0EkzpJt1IquWKjG+4EfXLvv+kOcBSJGvnvt7l40Yur9XvzvQvnNOxH+39Za9si3UCwAQHQFdghZdfc1AG5lj04foGPsBFz2ck4gVgMcJ6GKs8vd/X+dE5AmtqKP1JfAUQjrkKpXxSjtd8BFr79xWOMf4PmdahTkY8RDWC7pfaOzgOPeqx6ABnTQnbyesTfZ+3GVf/z6X3Rf6XAAp+etG9tLPidxj6N4O8lDwc91n/XfacNlDs5AvJCl6/3P2zQAOhb0o0hUj7Oj092+s7/PCd1PQSc/VCG99nGsWe0JxWFtfrmlVXah9igDLCOmQtxTGK+1i3S+Ec2HFnnMo6mKy2GPs3K9bd95vsNv3bk0v/Vz32YWIKwNuseXDBZf9wFdkrmblmIAi8dVzH3CZpZWjF743QGvLn2u2trSwiS9QaP6BoJrxEwygPHQF7ZC++toe/AYMbBxfdDEmP9YTmtyaj7C6e9DfE7rXyKHoGGLdwH3Tr1V15/R4/OTxfAJtT4Gj8oH1ChP/xcZ4JZhYAd0Y92hQsQO6Qi4m8wvIqlltNWL1YXoBnTSrLCYsBXTeppNn/0Uje9nXd7nfMnA4QKG0queMHPP6mYCtLSO1UAIAAAWiE1wV3RttKHJ7m9TFqGCKsZI/L/6znI/wvkPHOI/zGo5Z4SetR/1LFyAttAV2offMA2Lxz8lLvnXfUcJ3xMF4JTytvI4Rdi6wiCS4mFX2RV9MFmMBWUuvv0/zPH9RArozW9NrM91nL0Qa+O9pW0Qed9nrvkrc0j8SedLKcQHWte89Z0SQlUcz3Wcr1hYaADiWPk4fgE7SSa4RfTFmCEDPeYxJrmQCukVXX4pU1V45xnmcNRbQ7Tasr4tt1XWtwI7gAinr1b91Wr8axngluhjVc5sRq/bK7FTEz170sWqsBWQtQ3m2VQ++B12bKWubevoKoD9tZK83cChAYbzPZS8yVD23ML41HepHp/D7jADGbEY+HCYOAHSErxaoZjU/TvCVMleY7Aoq1nd5ShV03nKE9xw46n9RVzSbmlvYR6u6zn83fNq3ANS960Z0khxIjW9hy7YQBjFeMSPGdjFTLBAJy8AetYUeq/oFZJEPIdfrF6WCzhvfmt6od5+d1B8BMz7sspe838nvPi9zj1s6LsCqx132moadY5sI8Sb63UW/daCzViKvKAOAY6lmtYoYbOVfMjH29thMsG3hUoTf5ONOfPgFwBc7dCwhDeprpwJQ91hpVdelFvyivM5Xs9oSf9M2MF6xI2J7yymr5yRhR16I1CEp7DW4HHHOKNeALmYFndS2pmcjrc67rWsi8i6XPWzpmACr3uOyb/D7Nxo5vEu1rencJ0fq3Wf7QgWBAIKigg7AkbStQL/KZFd0MSqQUpjw2C1G4HjcfUVmDVTjd0IrrLtazWrOhxrVrDZhYOU9cFyzVInGxXjFpBgLi+aonosi6u94wfefa4n5GdKsoGtxIpP642DGJ5w88Fcu+2dfl7k/s3RcgDXvc9m3bds4Jv8wHqRthmuGc70h3gsomdgVdEwYADg0bdtlZl9tRJn8IKDrED95fNRqRD/Z6FtFJng/nmqNj6pZzT/zzLN/XUdtFvweHihQZ5d+XehKu8sIGK+YFSOgmy/IuUlNzIBuNZFzGbNjRa7zwNEDuu/bml76me6zc9qH3YQbIvJel30vAR1wex9zcu8nnTzgbJyjqe/fms79AfVnus8OMKgFchN7komV6QAOTKtZZjtQ9YPOitE+KLX2ljv7fFSzWoy3Hjjm+ZyStBfT9eq8yc4edtWstqwTrfMJtlkNZWXR1WNMkHeUD7f1/qnoy2rbeN/qcpa/13AYr5gX/Ptn0dUJ6OKIuSDY7wVqZPq2uHx74LxaNUdtcdlmwlo7Ct+27x2N7FUGDgUw6T0ue/ENGwe2GbB/Nqv9gPzEXr3cS9sdAAfh282JyGNMdpkUo5IkxQq6WI71O6wVZWVqRX9K9917oprVVqpZbYyxTDn5wMtPGi66+qQGjs8UkdPWtpRRPFMHwnjFNg3WQy8oWSjaeUoIC4JxWyYCOl/54pxMOSdi5bXtRJ5oZMMfdXKvgVMEmPOhRvZtRu7XINVz//ddZweck1FL31NlfSFZFto0MWgGcFt+4rua1eZ1QhzGEEx0XIyJ/WP/Di+6url97gPxE/BXROTTvkKJPevKzYfV/l7QsO5+v+eUoRMyqsEEcsJ4pTBoy10ubJVTfLnds1Yq6OQHtqcnnci6a+7xZOL1pMgzVhvZKwycHsCUv25k3/CkyD1G7tXZEOfGiYxZ+n4q8wtpyqtVwCExmQVgTxr++O+pYc6QWVG+w438fuHpxqx16AnMt8B8rJrVfKvSwrdvxPEsurpv4+nviQcN7UNEFV1OGK8USoxxC2OWCFhElozcrqOZgE6Za0fxIZe95G9ddp+BQwHM+Hsngw0bBzP3b7anc+9f/9N3ne0rWbscIJbYk2kEdABuoZUoK7SIQsnE2COqI1U1ur/VWCf+twrOt8C8SlAH0cUEi67uf88uGDghVNHlgPFK4QR/9mRRUTTMM2Bf3ZZOz7/Znp7/L3edXba0oe01v9dWI3v5V93l3mXgcIDoPuvk7o+77GVGKpmC7D3nmg/4lsvRF4y3KhjQlbzAnaxEHgMwcAbwNDrZtURbGpRQYQM6aU5Czlezmg8iznfqf7PAWkGdb3M4qQEmSsrvU+dDWxGZj/zbNsEi2M5hvFJIoauq1hM5b0Asuc0XmQropDkR3trE1IyPOnngzxtZ5YEux0oDlN7fuuxFT9o4Ccs/uD0dJJRyth8c5n5we9r0CuH/fNfZCgEdDmgpckA36NtP+H07uGAA2tpEMdlVDDGqMZjsMkyDCBaKPcWfh5FqVvMhXZCFjrDJV9FoVWXM37gxArrO0PFK7MAVhxf6uZfFGcDxlKbFpeiE+yUDh3JToxlKnPGVQ0YOCYjmg06+3sjZD7L3nIZL/SHe6wjMh3PAIVl4aKAFFADCuWKKEdAx2WWc7r01V/bz0MZ/p13Utpe0GCwxvzedjntjtZjvrWa1kbJfh+NqG69YnbOAHYxZAKPMBXTSrFaZdCKbrvnvJl4bTu5ZcdlLDZweIKpPu6xi4J7c/MHt6SABnW9vaem7qO1VmHCu058dSbNQqU5AB0B0IRB7uAAJIKTbk6/cWCEgKTcN6WI+U/L3d3yMVwoo0r6gBHSAUSYDunPb0xsNkcmGVq9Zef1tI3v4I07uNXCKgCje1ci+4R9s3I/zIT7/xbvO9jVERq19FzVEFs4VqHKu058f6dI9UWKt4m0hoANKrprVfMut4bKfByAlGtKd5qI+ja+me9S3vDR0TAjM79cYsYsVAd0xMF4BCiP0foMoGJMBnffI9vSUtZ7+XxCRP29k9K9Haa07GTQSkITaM8HiA8Nq5FWOQN5iV9EN0vIJKK9qVvObf1/kTwBIz6Kr+0qT+9k78Bbnq1ktSHcSmDUZ6b7ojVRJVHiMV3AEVNDFM1TWD46DMRvQidHWcn/vsm/+G5fdZ+D0AMFtSDZk4D5cf6S5V2XuDH4H+da/I49sT28U6a+fFpc4JAttLlnNC5QXk9RAwrSl35C1fe8NGCWkK69FV9/QkC4GArqj4X4tNvbNLRcLcxwwzHRA94bt6SUnsmxpgvyGL19pZK83cHqAoHx71yedPMfAfRikveVP3nW2z4mcMhbQjbxhe7pwgyoCOhyShcErVapACWmbN/ZxARLnw4hFV/et4Z4vIstc75t8SBeqUwmM0QrTGFV0BHSHxHglCXRsAXCT6YBOmZsk+4STF/73RvYyA4cCBPM+lz1w3cbpDrVSzFoFzYUf2p5m1Q2SpyvbY+9DR5tLoGSqWc3vDTHBdQfKw+99u+jqFW17Ocel33GumtVYqFReMQLaU2U/6YfBeAUA0mM+oPuh7ek1J3LBUhXLtoj8TSN73Wec3G3gFAFBfNTJ1xi4/9Z/KFx7yxFD3zurP7Q9XdjN26mgwxEEqZS9AyangHLxk5K9XPNCCzJG3IU9RRLgFwcturr/3X+miJwWkYWSn5IpFiqVVpS2ify9HQrjFQBITHcRPo5r/gBNWPoR+ozIPX/SyF7xrXe5/2rgcIDcbbjsAQPhSLAKMicyHOq9DqDQQQGhGo7A3+ujkU/cWMS9OAAEpBODsb9z7mRVA6hWq+u8xkQ+cLqY0/923mLs0cskaUJ0Hy4fUMxqlUpFu2r4f/aX6FT06nmg9WDJ+HugmtWWI1S1DbA/1p0xXnmaIo9XAOBpChHQ/fD29MZP3HXWB3RXDBzOTU+47JUfdu6t/ziTjxk5JCAXH3Zy7zWRewyc3SBVNT9x11lL7S0v/XCgqkHAkHkDv/n9vsWT7scBIG0WF8Js6nehfy1pcJC7alaL+6lhSYzqQDMT9HrPte7B1sR4pe2VemB3inFQaS1FCOgqRvahto7ximK8AiAlRdiDbscPb0/P+jZvllpdfl5E3tHIvt/A6QFy9X6Xfe01G/ddkEG7E6kY+Z7ZdAlU8NDiEoelD3YW2kvR5hIoB0t7uayLyCO+msC33Ft09flQk104Gq20SlGMz2W2gkb3q5vV+3JA9617RMcrsffOzctUwn/fuD0Wh9rFeAUooEVXZwEC9lWICroW1/wxumrjaJo+5LKvf7eT+742c++ycDxAHj7p5LkGgpHVH9meDjLgc3bauUyF+sx5IlTDEflVmLFbzfrV4xUG1EC6fIWIoTaFF/xvPxNcR+O/qyOtqB+i8qN8/L51GmT47Tj8d8nQrgq7FNqf9upipSkDx4JwYgTlBMF3wHgFANJVmAo670e2p5esbdi8JSJ/1sh+wMChALn5tGTPN3B2g0x8/Ke7zvqHg8EQ73UH6z+yPc3+VyizIC1tD4D7EEibhbbWfr+W+xddfZLJLhgSusWdFLlyxwd2i67uJ6xHFl29r63CbtnA4R2HpYodBKDhc2gxWuoWDeOVtLDnYvmkWm2PDihUQCdaRadt38y0uvyUk2f/YSN7lYHTA+Tic05eVKL2lkNGvluSWalKi0schaE2lztVdAaOA0A+Ylfqzvlqm0gToilaj/CZ+I3onGQmfNsCO39/ZyLykN9bOtLf6HH0Mw4CTGC8kpYYAd1Ayie0ALh3cFuFC+j+7fb0mp+4thTQbYvI+xrZyGec3G3gFAEd9T6XvYD954K//CKEZDZkJ6DDMVi5D5K5HwE8pZrVYq9Gn9N9W1iF3jm0ZuuAiIFMspNXuj/TxK7961YNHNpBsCcvEJGBkJzxShoI6OKiahK3Vag96Fq0ssQPEvttHNFOneo9VxvZ64fvcj9r4HCAjvmMk3uvxz+d6/9rufafmw31eUMgVMNR+cmsalbbNLDfgl897lu50O4SSEvMllrLfrKLv6eOW4nQmjHF1mxRJvHKMvnbvn9dNasNaOu6MSNt9vdCBR0QV8x7kPFKPgg7yydqQKfV/DCqcBV0np+4diKTlqro/OvvXfaS/89lLzBwioCO+bCTLzdwfwVbTWukxWVSG7FTQYdjslK9NqGTaADSEWvCa9PIXjIpijH5kWJAF+MzFX2vtiNZdPU1bYXpz7nf9/uCwTaYfqESe4QB8TBeSUykVqE8y8YVtUsA7aptK2RA5/277elZa4P4z4vIOxvZwwYOBeiYz4hYCJ2D/JD9+7vO9hmo1Fn9d9vTlL4DT7ESWPfS6hJITqwJZ9pE5SfG5Edvggs4YtwbpR//alg3qW0wHzI230FAVxKRJnFLf//fAeMVdAIBXVyxv+f4HTeskC0uW3wVnYhctXE0TR908sA7GlnlG7tckP2ygLxdk+xuA1VLofafs/CDlVT1nFD1hmPyk1XVrOY3JR81cC5P0eryaKpZzS+AmIh8GH7ik5AVO/RvMsaiHN8qap6rkJtYq5MriS3iCN0mVFLef+4o9HtiXsOSyUjXpB0Tu+UR41oT0N0G45WkLQf+biegichXTVazWsxD4PobVuiA7n/bnl76sbvOWpm027EtIu9x2Wv+qXPv7M3kSQOHBBzLZ528yEDAEmTAbiSgS24QTECHDpg19Ft/vprVlhZdnYU4h+PDufORj+FS5PeHLbF+85NbiGOJX+lfzWrrEfYqTyagi9gCiYBuDzreqOh1mYq4Tx2tscqDSVxbGK+kK3R1oq/476MqMqrQoWw72tUaVuiATk3oH1nstnQ3fcLJs//UZS/9lsy92cghAUfWMHDqfjRcy8fYK0NXf3R7OrnB0o9uT/uJBTakxZH5yalqVos5mN3Nr2gf8tV9XNU7031rYodzEqoaG9jHJqvRg1iKsKgjpUmPKJ+FhS/70/Mz5Cv5jfymIl0xwljuf1sYr4ThF6YMB37PIe63qJYizmn06hwGC6IMKuwedC1+MtuJTDmt0rDyem8jG/6gk3ttnCXgaP7KZfd9If49tRrq8vkKusiflUEwcHuW2kr2akjXZ+BYisBEVQkTDdglxv3LhEgYsfahS6XqJEZAZ2pvecu0zfaDfgI98GFaWSSFHOl+mjGqNKnosYXxShgxxitUQ8cVOxwbi30CsLfCB3Te/749PelE1i0FdJ8VecafNLJXGDg9wJE5bdsa+X4KNlh3In2RPysDYeA2dOW4pQk8P3mxREi3P13pH6sdV7sFA8cAW2KEKaxYDSPWeKrwkx4aMoZuDyrcG4ejYyJaVSEPUb7HqOjYV4xAhesRBgFdyRhYMEpAZ1QSAZ2aMHEUbdZd9pJ3uew+MwcEHNLHnTzXwDkLOckSdRL5fLMVJIDbs1RFJ4R0+6tmtRFDbbionoMFTHgFoBO96xHeOoVJj1ifge/oQ9KQbq5QB40iiPEdQAWtPYxXAtDtEqiGLp+Y33m+4wMhnUHJBHTnt6fnnciypSo63xrwnY3s9QZOD3AkTzo5aeFeCiXy5wzWyhMoKoNVdEJItzetwjDR2lIx+YvdYrTTooVXODEWPRV60kN/x2JVz7BI7WiminjQsEm/v6igtYfxStqC//7pIkrE4R0IEAAAGcRJREFUE/u51Nqi59KTxCro/AT3mKWAzr8+6uSFb29klBCjkIzcR0EG7OfvOlspw+cEEmBx8rMV0g0YOJbodJJ3Xvfqs2Bu0dWZaMBuMX53CfLDiTX5Ya6ryyGMRfrepgXxEdEWEJ2iY7dYk7YE9PtjvJK2GH//VFDFFTug66eKzp6kAroL29O+PPiSgUO5qSEif+myM5tO7jZySMCBfUbkBQbOVlkmVdcMHANgnrYCMfVbr3xIt1LNaqVelKMTPEuRVmDfDtVzsCLGvnelFHGPj8Ei/g7od3escJHv6CNiYRA6aDLi2I2Azh7GK+HE+A0c5vcjHp3PiN1Ba4oOQLYkFdBJMxCbbIhsNjQcs/D6pJN73uGylxo4PcChfF6yuy3cQyEY+JysgAUObjJCv/6D8JUHV6tZrZRtI/RBbyn2fp67rBvYjBs2xVgAxIRXWLH257LU3vegJiJOzvMdfXRMsOLYtN3duUhncoEuB3fEeCVhGtbE2De3yBX/KYg9Vuwt6Hg1WckFdD+2Pb3hRCattbr8q0b28Aec3GvgFAEHVqYWl04kdotLHkyAA9IHecttGc5Xs1qpWl7qnnMrxsI5YX8e3E6k1nDDrFYNKlbw01+khRr6WxVroo7J+eMJuY9QjAlk5MzAnsEE9HfAeKUUYtwH56iii8rCd98wrS7tSC6g8358e3rK2gDy8yLy9kY2auBQgEL58e3psjy0MzkBHIJWRS0bPmenROQJP0mb+gNuNav5id3HDO0517LJykDcQYz2MqxYDkR/J2JVW0/oxHcRzEb8/uY7+oh0bBFyYo12/InR76ilyOM3ArqDYbyStli/hSxkjEQrJy3swXuFkM6GJAM6aVajjFmronvcZd+86rL7DJwe4EAs3DehxP6cP749TYtL4PDGjLa6bHde96ZLbuDrV136SkERuWjgcPYyRWUG7iDGb+8Eq9KDijXptdM6yPq11kq/U5HenhbExzMZOFjh9zQhRsK5OcZpB8Z4JWFaJRmjyIQKqrisBKSEdAYkG9D9++3pJSeyYCmguy4if9zIfsDA6QEO5LNOXkRAl9bnBFKiK8+KMJjs14HvWgqDX/+wrpO6T0Sc2L2TTVaF4gBiTHix50NYMb8HBi1fa/09Oh/xELgPjkivXeg9w1hMmAhDnQ/4Djg4xivpi1ZFV6CK/6QsuvpSpOrYvVwp6z76ViQb0ClzJdmfdPLstzWylxk4FOCOGpwiAMbp6vu5glynVlC3Uc1qU0Xr+68Vc5Pa5irmpO5BUD2Hg4hVveNXLDPpFYAu5Ij5G2HyWmvAcyXiIbCI4oj0dzjGtSOgKzhjnQ+WdXIaB8N4JX0xK/6XCOmisTQW8vvor/C3EEfSAd1/2J5ecyIXLFXRbfmlSo3stX/v5F4DpwjYl4V7JhQq6IBCmzC0+uwgenXl+xM6CJ6wHNZVs9pINavNa8XceYN7ze3GxC8ORMObWPtWjzLpFUzs8+yv9byVVmEGwjlvnkUUh6O/xTEXyBDQFZQGc7PGOh9QqXEIjFfSF3lBUSukq5TpnFuw6OqzEe/tvfjuD4/5e56gLqzUK+j8xPeUE9m0FNJ9RuQZb2tk32/g9AD7IqAjoAOKQCf5irAf3V4GdSVzK6yb0km4aBO5fjCuoaGfUPZfUY/6FbSxjucIJpj4xSHE3ANrlJWq+dMqjeXIhzEce4W6tieeMhDOCZPzB+MnS3VcsKa/xf2RDmVdJ49RIPr30wrmRg0dOdVzR8N4JX0xg1Af0l2lzWEUFs/5qAZ1O3vpF63zTxF1p/4B/+P29Ma/7To7YeRB5KZ1l339H4i87F/d5X7LyCEBtypTckRKBhSa31xb99Qw9Xt/SIP62tlXpprV1nXFeuu10ckJDX3I9kGg/+eA/tPqnnIHtawrEYGDmoqwl1O71kpVv2p6kknw3PjJj6uRj2FQQzrfgjfoZIyuip+NGPC0m8v771yrBIu85+uAkWvVQphSADqBWml7WfobakcAcDSMVxLnn/OqWW058vPY+dZvKEF6GP7ZVc+5xefwwdb8SjWrre4xN0F1fYckH9B5/7ExPfsjzZBu0MDh7PCtLv+ikb32uZm856u73OMGDgm4RZmqu2J/zh/pOlv5T41pBkDAMejgdijyw2sn9evrZvVaNau1/tUPkHdXibV/h7SCt92sTfp1mrn9h2Gbn2AyMBkiulJ1VCe+5nV/TXSITnotGKgG7m2b/JrMe0GBBnOThiZ9NgN9Tw8ksODEkpS/jwYKXDHSPtYryt/7ApP+R8N4pTQsLCjq12q6dQ2GZ+lOkjs/NnrM+DG2FhPfrMhum5tIgV/oG63NaykCOmlOvk8Y+JJ7Gt/q8g8b2fd9WeYmn5nJk4YODdhBQAegaBZdfULbQ1pq5ZOHvRYdlX0y8gKr+HBEs4bun9bE16aG7iv6zzVWqx/bhKF2vX7y64q2nJzVya+OfH/pb+CIfl4zC1TVFJN8hbOZ+AR8f8R9/comVECfMsYridMFRXNGnmX7dSuGi1o9Na/XeY1nrs7SbkCXElpojEMqTUD3fzWml/6XrrNWvuRu+pCTF/5+Ixt99V3uZ4wcEnDTF2fywX9w8hzOCICCmdAVxdYmJpGf5dAt45AOrb6dNFZd2qth0nBr8rhtlWqe+6lF2/8yb1p9cMHYZHyvTsac05XqS+3tgw4SZmnleHtrO6u/fet8TxcS1THoFNoiHhPjladJdryiVXQjem6tGGwfX9yhqwuOd91T7naD2yhNQCfNChmLX3Ly7kb24quZvPvBLkepP0zp2cmQyxHQGaigY9NVoEP8hKa29VoipCuFTR3fAccxWaA9LMteLXtkPiDS9pIWJz/6WxUJrf9AJ8B8cLd7UruvgL9vRd4TrswIVdEJfiHVFGeyIxivJE4XFE0VpLqXZ+0O0TmMMWvd/xBGV5nO8080ptecyJRra91n4XVdRP5oO3vDHzeyf2bgNAE3Wbg/QjHwWQnogA7SqoOKrupD2kZomYbj0r3A8lzpDTuKFhT16yRn+6toE2KX2HeqkJapeEIHsJCqgxivlINWnPMcWzI6VrpQ9vNQRqUK6KQ5Ce4DunVrId0XROTtjeyR9zSyFxg4TcAOC/fGD3WdHQpxNZzIUuTPmnKLBiAKQrpSOM2kLzqIvXFKgMmP4Napwiosrhs6ocJCqo5jvFIOYxpwo0Q0nCWEL5nSBXQ/2Zje8K0urQV0/vVpJ/f8ZiP7sXcT0sGIk+KeNHBvBAmuDHzOIEEkUDaEdEmb01XEQEfohveXOJvpY2V6UFQ5F9MyC2DQAaf1txUdpOeUhSaJ0+tMGFtOI7rACSVRuoDO+6nGtNmScB/SLRLSwYi+TB4v0bWIPXFABR2QE0K6JPlwjv2M0HGLrj7Bd0VpjLAyPXdMzhcXv7E4rksspMoPC03KQe+hubKfh7LR+QvGqSVSyoBOmtUqExar6PzrU1TSwRAD90QlxNn4qcb0SuTPyea6QI7aQjoecIqPcA55o6VQCejeWkHGmSVFlXNxXWDvORzTnC54Qb4Yr5SAPvcQxpaMLnCqcI+XQ2kDuovNyfg5yyHdAiEdIntuJh+xcD+E4kQ2Y37OiUD77QFl5UM6fcChhV1xEc4hd/pAzN9ZCei1Pl3285CDBb6rC2tVK3OAo2KsFgjjlVKhG0wJEdKVR2kDOjVh+Y/ct7t8y3b2f/xBI2NlJ6I4KfKkgTMf8u8/dgseAjogAF3Re5qBbuEw4YNgFl19XkQe4YynT6u8COk6Z5UJ48La1JZawFExVguM8Uo5sGVDeRHSlUOpA7qpxvSGE5myWkXnmnffM5a3szc8up29ysApQ8n8j5n76Mn490GwvdmcyFrkz0pABwSiE7IVNl8ujEeY8EFoi64+RVvcctDfhAtlPw8d4CcOKzqRiOIZobUljuESY7U4GK+UAyFdeRHSpa/sFXRyqTE96UTWLYd0nxeRP2lkD//CVvbDn3Jyt4HThpLoz+RjWfx7INjebE4k9j50BHRAQDrQ9ffdAufdLP8Q8pBOPADB6WQjk14loG39qKQ7OsK5Yju96OpLZT8JOLLT7DkXF+OVciCkK6+2uQuufYJKH9Ap86t8tkXk3S775l/Y7vrJv2ZfOgTUm8nfxD7fPxhub7bYLS5PRX5/oHR0XzrfzukhVqSZ05rsnS/7iUBc7F1ZHrS7PLJlwrlCO61/+8Bh+bHz/fz92MB4pRzaQjoWmZaMVrlXCOPTQ0AnIv+5Mb3kRBYsV9G1Xh928uxf3c6m3kLLSwTyReI+YuBvfyDEpzVQQSf/c9dZ9pwEItAQaEgnGRHfJZ3sjb1wAtjRtnclEqcTzSzaODi/5xThXHERzuGofDgwwFjNFsYr5dC2yJT23CWj136MPfXTQkCnnMiYE9ksQkj3ORH5743s4Z/a6voJqumQt75MHjfwdx+kgu6/NPeljN3ylo3ZgUj8ijQ/ycjEbFR+T8AH/eQCk72wRiex7+f7IX26aIMWUnd2mj2nCqvVQppwDoe1qXsDjzBWs4nxSnloe+4H2Ve9fPQ+Z4FxIgjo1E83pjeK0Oqyxbe8/ICTF75pO5v65e3s9exNh7w8L5OPGDi5IavKYu+9QAUdEJlOzA7QIiY4f76H2AMHlmmlwAAPw+lr25CfFlK3WqetXaHRQhpHtaxjNfYGNo7xSnnos9MQz67l07bA+DQhbbER0LX56cb0fENkoSEiRXl9VkT+tJG98me2u2Z+t5ExsY+O6xP5WHf8+yHY3mwNkaXIn3Xw+7vOBmnpCeD2tHWEbxHzfCZnc7esE71UzaEQ9PvBj7sfYXV62tindE+txRS0tSumS4uuzvXDYa1rxWVF90BCATBeKY+2Z9f7CWXLp62a7gL3ejER0O3iJBtzkm06yaRIr4+77J7f3e56w/+51VUnqEMnfV2Xe/ykgXvh+7pqQf6unWRLBu597mHACF2VNqKtQ3jY6az2yR4mClE4WkEwwEbt6WurrC7zgg1aEBfbzcUwZT8ROJR1bWU7QMVlcTFeKQ//TEVFVTlpSDup9zpBXcEQ0O3ys416oVpdtvNtLz/ismcT1KHTnpG5dxo4qUH2ofvZxs6KwNgDGfahA4zxrUP0YYeg7viY7EEy2jZq57shcW3VdA+WbG86P8FzQb+zaUFcPK3fXBbD4DDax2q0sk0A45Vy8fetv381qON6l8iuoO4RgtpiIKDbw8826vNO5JLbqaYp3mtLRD7ssme/dbvrDZNbd/3Sm7e7XvWEy+41d6JRGF+ayRMG7oVggbMTWYr8WYdrXTXaXAIGtQV1z2cV6qEta8Uckz1Izq4Qn7a4CdNrPVSC1embugJ7QCd6UCyrBCw4ggXGamljvFIuGtRVtPXlHFVV5aFB3ZQGtQ8xd2Fbd9lPwO3UG/WJs82WeoM2j/DOfFD3SSf3/JHLHv7TRvbwczL351/d5Zb/eebe+axMnrR+/LDjyzP3Ad96MbLhUG/vRHxFx2jkz+tXaLP5NmCU7r8xVs1qE1p57//Zz/W6hX8I9N+pk+xZgjLQCqOlaraz0GZCf8/5bkiQTl7PVrNa6zegsM+Nu2zqGHSKVpaF0/rNnaJaDofgw1z/fTbPWK08GK+Ui/4m+GfXPr3W/uXnvHvLfm7KQLvWzOvcRevaj3D97SCg25//g11L4Q/2CyLyPpc98L7t7IH/R0S+LHNLz8vkPYNd7s+fn7mPGThEGPbCTN79Nv07iulsV21kupF/OzT/Hme7arEvyBgBHWCfTl7uTGRWs52FPWMMdncs6EQPq69RSjrJ6R+CJ6pZbajtu4HJr8S0BXVF/w3wVc6zfG8XzrqGcku0jcYB+SB3SV+EciXHeKVc9Nl1Vl+iY5cR3VLmVNnPT+r2uP5DbYHdEHMY8RDQ7WO6Ud8Yb1bRPWb2II/Aj8Y2XVb5WyeVtzcyeUYmH7xX3MqXZfL+r8zce+/rco8X7kMhVz7EvSeTD37eyXMin+kRfQDNnWtOLger2tvD4HhXbWimwepXoChaK1GlOdgdaVudWIaBbmuyZ14ne6i6AJSuWm5Nfg20PQQzGZKQtmqE9tXpMceSB0HlTLGs6wJi/7fmv1dWuG64g83W30rb3wzPl9gT45XyaX9+lacCuyHdv6z1T8LaROk9f/M3Qe/7obZXH/d+GAR0d+Anx8901fz+AldMH+gRfd6/nDzno5I9511O5I8kk55tkb7M/c0Xi3zkWZk83i3S9RWZ+7vWO3xl5h7/Elpkls6XilvxfyeRP3fIfejmDUyqtFrnASiYVhsJuXVlWkoD3NW21ddLB/j/T82FwJ/H0iTsWoTPn8TfmE6mP61CSQOdIf0/g411DEnq+6N9dbJe20rbK3YbzLJUzqTyN9X6HBslClVC/7akZq01Xijp2KwdY5VjYLyyp+Tvqd2BXYsGNwP6f7b/e6pKufhF7/u1vQojdE6jT//PFP8Gol7zzDkX8/0L40xXzbevOlf28+DdbeAYEN62gRaX6v7LgarKznTVNgxUvjzzcoNKFCAlujKxUrBWEuttq6+XmPQBgMNrC+zaVyfntTKdyhkAAADAOAK6Q/jerppfPTJamAMG0nTp5xr1iRCfzMg9f+HnGvXJyMcAIEdtKxIrbavRYgV3vipuQydz19omdVkoAAA5aVuVfNgV6hvtrYlaq95ZRAEAAAAUAwHdIX1PV23FQGsSoMw2f75R7wvx+b+nuQfl1cjn2q9+Hvh5quiAUrrNpG17e5nDap+0XWtr5UAIBwAAAAAAEBAB3SF9T9dOW5IlQjogqod+vlG/pSdyHr6nq7ZmYFPcCz9PFR0AAAAAAAAAJIOA7ghOE9IBsS1cadRHQhzD6a6ab6d5MfLn3amiu0IVHQAAAAAAAAAkoYvLeHg6SV7RfVoAhDd8uqt2pz05OmVWA7KY/D5UQfbdAwAAAAAAAADkj4DuiHxI50QqTmTV1yDy4sUr+Gss4L0+b+D6ToyFCyUBAAAAAAAAADkioDuGWUI6XrxivoIEdNJ8r0kD17rXH0eozwwAAAAAAAAAyA8B3THNPdXucqHQHwQonv7RrlqQkG6uUV/z/zBwhkZHu2oVA8cBAAAAAAAAADgGAroO8CHdXKM+4kTmqKjixSvoK9i+bEaq6PxrNtRnBgAAAAAAAADkI3POcWo76OGumm9Bdz6ZDwTY9+AvNupLIY7y4a6aD8dGDZyRC7/YqNPuEgAAAAAAAAAKigq6DtNJ89MispnUBwPsChlUWQnFzj/cVRsycBwAAAAAAAAAgCOggi4n39WcPJ/3+2Ql+QEBWx58Y6Aquu+yU0W36ve/fGNzH0wAAAAAAAAAQIFQQZeTNzbqK87JkHOy4DNQXrx45foKVtnmnEw4J5sGrudgyM8NAAAAAAAAAOgcKugC+M6sNiEiF5P/oEBcD/2Sq8+HOILvzEztNRnscwMAAAAAAAAAOoOALpDXZTstL31rvMFSfGAgvPVfdvWBUO/6uqy2ZqSFrd/vsvLLrr5i4FgAAAAAAAAAAAdAi8tA/OS5E6k4kUs+EuXFi1fHX/2vbVarBuFExoxcw14nMvvarNaX+vcoAAAAAAAAAKSCCroIXpPVKlpNZ6H6BkiJryYbeJOrb4T4TK/Jar615LCR87fwJlcfMXAcAAAAAAAAAIA7oIIugje5+pITGXIiF6ii4sWr49VkU6Huaq2i2zRyDYe/I6vNWvquAwAAAAAAAADsjQq6yF6d1Qa0mu5UqU8E0FkP/pqrL4U4p6/Oar5q7VFD1+/0r7k6QR0AAAAAAAAAGEZAZ4RO8k/R9hLoiPVfc/WBUKfy1bZaXQohHQAAAAAAAADYRkBnzKuy2pgGdb1lPxfAMV14s6tPhjiJr8pqfSKyYixgP/1mQjoAAAAAAAAAMImAzqBXNif7J/RFUAcc3f2/7uorIc7fK7NaRUSuGrtWp3+dkA4AAAAAAAAAzCGgM0yDujEN6mh9CRzeuogM/bqrb4Q4d6/Mar5i77yx60RIBwAAAAAAAADGENAVxCuarS99UDdY9nMBHNLcb7j6WKiT9oqstiQip4xdpNO/QUgHAAAAAAAAAGYQ0BXMy5tt9HzYMFr2cwEcwum3BAqoXm5zPzrvwlsC7ckHAAAAAAAAANgfAV1Bvfyp9pdjVNUBd7QpIpW3BNqP7uVZbUhElgzuITn3loDVhAAAAAAAAACAvRHQJeChrDag7S9H2KsOuK1VH9I9Gmg/uoeymr8fHzV4OYKeBwAAAAAAAADArQjoEjPSrNypUFkH7Glh3tVHQp2akebekVcMXoqdisL5QBWFAAAAAAAAAICnI6BL2HCzDWal7UVgB4jMLQRs8zic1aZE5JzR835hgX3pAAAAAAAAACA4AroS+dfNwK5VYTekL1piooxO/6arz4b63P86q/n3GjV6npd9xe1vuvqagWMBAAAAAAAAgFIgoIMPD3xgN6AvH9r16b8T3iFlhHRP8S0vJ3/T1aesHBAAAAAAAAAApIyADndUbe5r18eZQmoWXX0p5EeqNsNwy1YWXX2DP3QAAAAAAAAAyBcBHQAAAAAAAAAAABBQFycbAAAAAAAAAAAACIeADgAAAAAAAAAAAAiIgO7/b8+OCQAAABAG2T+1LXZBDQAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAAKCy7W5qEB/G5B1kAAAAAElFTkSuQmCC mediatype: image/png install: spec: clusterPermissions: - rules: - apiGroups: - "" resources: - pods - pods/exec - services - endpoints - persistentvolumeclaims - persistentvolumes - events - configmaps - secrets - namespaces - nodes verbs: - '*' - apiGroups: - extensions resources: - deployments - daemonsets - replicasets - ingresses verbs: - '*' - apiGroups: - apps resources: - deployments - daemonsets - replicasets - statefulsets verbs: - '*' - apiGroups: - chaosblade.io resources: - chaosblades - chaosblades/status verbs: - '*' serviceAccountName: chaosblade deployments: - name: chaosblade-operator spec: replicas: 1 selector: matchLabels: name: chaosblade-operator strategy: {} template: metadata: labels: name: chaosblade-operator spec: containers: - args: - --blade-version=0.5.0 - --image-repo=chaosbladeio/chaosblade-tool - --pull-policy=IfNotPresent - --namespace=kube-system command: - chaosblade-operator env: - name: WATCH_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.annotations['olm.targetNamespaces'] - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: OPERATOR_NAME value: chaosblade-operator image: chaosbladeio/chaosblade-operator:0.5.1 imagePullPolicy: IfNotPresent name: chaosblade-operator resources: {} serviceAccountName: chaosblade strategy: deployment installModes: - supported: true type: OwnNamespace - supported: true type: SingleNamespace - supported: false type: MultiNamespace - supported: true type: AllNamespaces keywords: - chaosblade - cloud native - kubernetes - open source - chaos engineering maturity: alpha labels: alm-owner-etcd: chaosblade-operator operated-by: chaosblade-operator selector: matchLabels: alm-owner-etcd: chaosblade-operator operated-by: chaosblade-operator links: - name: ChaosBlade url: https://github.com/chaosblade-io - name: Chaosblade CLI url: https://github.com/chaosblade-io/chaosblade - name: Chaosblade for Basic Resource url: https://github.com/chaosblade-io/chaosblade-exec-os - name: Chaosblade for Docker url: https://github.com/chaosblade-io/chaosblade-exec-docker - name: Chaosblade for Java url: https://github.com/chaosblade-io/chaosblade-exec-jvm - name: Chaosblade for C++ url: https://github.com/chaosblade-io/chaosblade-exec-cplus - name: Chaosblade for Kubernetes url: https://github.com/chaosblade-io/chaosblade-operator - name: Documentation(Chinese) url: https://chaosblade-io.gitbook.io/chaosblade-help-zh-cn maintainers: - email: chaosblade.io.01@gmail.com name: ChaosBlade Community minKubeVersion: 1.12.0 provider: name: Alibaba Cloud version: 0.5.1 ================================================ FILE: deploy/olm/deploy/olm-catalog/chaosblade-operator/0.5.1/chaosblade_v1alpha1_chaosblade_crd.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: chaosblades.chaosblade.io spec: group: chaosblade.io names: kind: ChaosBlade listKind: ChaosBladeList plural: chaosblades singular: chaosblade scope: Namespaced subresources: status: {} validation: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' type: string metadata: type: object spec: properties: experiments: description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: description: Action is the experiment scenario of the target, such as delay, load type: string desc: description: Desc is the experiment description type: string matchers: description: Matchers is the experiment rules items: properties: name: description: Name is the name of flag type: string value: description: 'TODO: Temporarily defined as an array for all flags Value is the value of flag' items: type: string type: array required: - name - value type: object type: array scope: description: Scope is the area of the experiments, currently support node, pod and container type: string target: description: Target is the experiment target, such as cpu, network type: string required: - scope - target - action type: object type: array required: - experiments type: object status: properties: expStatuses: description: 'Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: type: string error: type: string resStatuses: description: ResStatuses is the details of the experiment items: properties: error: description: experiment error type: string id: description: experiment uid in chaosblade type: string kind: description: Kind type: string name: description: resource name type: string nodeName: description: NodeName type: string state: description: experiment state type: string success: description: success type: boolean uid: description: resource uid type: string required: - state - kind - success type: object type: array scope: description: experiment scope for cache type: string state: description: State is used to describe the experiment result type: string success: description: Success is used to judge the experiment result type: boolean target: type: string required: - scope - target - action - success - state type: object type: array phase: description: Phase indicates the state of the experiment Initial -> Running -> Updating -> Destroying -> Destroyed type: string required: - expStatuses type: object version: v1alpha1 versions: - name: v1alpha1 served: true storage: true ================================================ FILE: deploy/olm/deploy/olm-catalog/chaosblade-operator/0.6.0/chaosblade-operator.v0.6.0.clusterserviceversion.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: annotations: capabilities: Basic Install categories: Chaos Engineering containerImage: chaosbladeio/chaosblade-operator:0.6.0 createdAt: 2020-02-11T15:40:00Z certified: "false" support: chaosblade.io repository: https://github.com/chaosblade-io/chaosblade-operator description: A chaos engineering operator for cloud-native on Kubernetes environments. alm-examples: |- [ { "apiVersion": "chaosblade.io/v1alpha1", "kind": "ChaosBlade", "metadata": { "name": "delay-pod-network-by-names" }, "spec": { "experiments": [ { "scope": "pod", "target": "network", "action": "delay", "desc": "delay pod network by names", "matchers": [ { "name": "names", "value": [ "redis-slave-674d68586-jnf7f" ] }, { "name": "namespace", "value": [ "default" ] }, { "name": "local-port", "value": [ "6379" ] }, { "name": "interface", "value": [ "eth0" ] }, { "name": "time", "value": [ "3000" ] }, { "name": "offset", "value": [ "1000" ] } ] } ] } } ] name: chaosblade-operator.v0.6.0 namespace: kube-system spec: apiservicedefinitions: {} customresourcedefinitions: owned: - description: Chaos engineering experiment definition displayName: ChaosBlade kind: ChaosBlade name: chaosblades.chaosblade.io version: v1alpha1 description: > ## Introduction Chaosblade Operator is a chaos experiments injection tool for cloud-native on kubernetes platform. By defining Kubernetes CRD to manage chaos experiments, each experiment has a very clear execution status. The tool has the characteristics of simple deployment, convenient execution, standardized implementation, and rich experiments. The chaos experimental model in chaosblade is well integrated with Kubernetes, which can realize the reuse of experiments such as basic resources, application services, and containers on the Kubernetes platform, which facilitates the expansion of resource experiments under Kubernetes, and can be executed uniformly through chaosblade cli tool. ## Supported experiments (continuously adding ...) The current experimental scenarios involve resources including Node, Pod, and Container. The specific supported experimental scenarios are as follows: * Node: * CPU: specify CPU usage * Network: specify network card, port, IP, etc. packet delay, packet loss, packet blocking, packet duplication, packet re-ordering, packet corruption, etc. * Process: specify process Hang, kill process, etc. * Disk: specify the directory disk occupation, disk IO read and write load, etc. * Memory: specify memory usage * Pod: * Network: specify network card, port, IP, etc. packet delay, packet loss, packet blocking, packet duplication, packet re-ordering, packet corruption, etc. * Disk: specify the directory disk occupation, disk IO read and write load, etc. * Memory: specify memory usage * Pod: kill pod * Container: * CPU: specify CPU usage * Network: specify network card, port, IP, etc. packet delay, packet loss, packet blocking, packet duplication, packet re-ordering, packet corruption, etc. * Process: specify process Hang, kill process, etc. * Disk: specify the directory disk occupation, disk IO read and write load, etc. * Memory: specify memory usage * Container: remove container ## Install and uninstall Chaosblade operator can be installed through kubectl or helm, the installation method is as follows: Note: For the following `VERSION`, please use the latest version number instead ### Helm v2 * Download the latest `chaosblade-operator-VERSION-v2.tgz` package at [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) * Install using `helm install --namespace kube-system --name chaosblade-operator chaosblade-operator-VERSION-v2.tgz` * Use `kubectl get pod -l part-of=chaosblade -n kube-system` to check the installation status of the Pod. If both are running, the installation was successful * Use the following command to uninstall, pay attention to the execution order: ```shell script kubectl delete crd chaosblades.chaosblade.io helm del --purge chaosblade-operator ``` ### Helm v3 * Download the latest `chaosblade-operator-VERSION-v3.tgz` package at [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) * Use `helm install chaosblade-operator chaosblade-operator-VERSION-v3.tgz --namespace kube-system` command to install * Use `kubectl get pod -l part-of=chaosblade -n kube-system` to check the installation status of the Pod. If both are running, the installation was successful * Use the following command to uninstall, pay attention to the execution order: ```shell script kubectl delete crd chaosblades.chaosblade.io helm uninstall chaosblade-operator -n kube-system ``` ### Kubectl * Download the latest `chaosblade-operator-yaml-VERSION.tar.gz` package at [Release](https://github.com/chaosblade-io/chaosblade-operator/releases) * After decompression, execute `kubectl apply -f chaosblade-operator-yaml-VERSION/` installation * Use `kubectl get pod -l part-of=chaosblade -n kube-system` to check the installation status of the Pod. If both are running, the installation was successful * Use the following command to uninstall, pay attention to the execution order: ```shell script kubectl delete crd chaosblades.chaosblade.io kubectl delete -f chaosblade-operator-yaml-VERSION/ ``` ## How to use You can run chaos experiments after installing the chaosblade operator. There are three ways to execute chaos experiments: * By configuring yaml file, use kubectl to execute * Executed using chaosblade cli tool * Use Kubernetes API to execute by writing code The following uses a specific case to illustrate the use of chaosblade-operator: simulate cn-hangzhou.192.168.0.205 node local port 40690 60% network packet loss. ### By configuring the yaml file, use kubectl to execute ``` apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: loss-node-network-by-names spec: experiments: - scope: node target: network action: loss desc: "node network loss" matchers: - name: names value: ["cn-hangzhou.192.168.0.205"] - name: percent value: ["60"] - name: interface value: ["eth0"] - name: local-port value: ["40690"] ``` Execute experiment: ``` kubectl apply -f loss-node-network-by-names.yaml ``` Query the experimental status, the returned information is as follows (spec and other contents are omitted): ``` ~ » kubectl get blade loss-node-network-by-names -o json { "apiVersion": "chaosblade.io/v1alpha1", "kind": "ChaosBlade", "metadata": { "creationTimestamp": "2019-11-04T09:56:36Z", "finalizers": [ "finalizer.chaosblade.io" ], "generation": 1, "name": "loss-node-network-by-names", "resourceVersion": "9262302", "selfLink": "/apis/chaosblade.io/v1alpha1/chaosblades/loss-node-network-by-names", "uid": "63a926dd-fee9-11e9-b3be-00163e136d88" }, "status": { "expStatuses": [ { "action": "loss", "resStatuses": [ { "id": "057acaa47ae69363", "kind": "node", "name": "cn-hangzhou.192.168.0.205", "nodeName": "cn-hangzhou.192.168.0.205", "state": "Success", "success": true, "uid": "e179b30d-df77-11e9-b3be-00163e136d88" } ], "scope": "node", "state": "Success", "success": true, "target": "network" } ], "phase": "Running" } } ``` From the above, you can clearly see the running status of the chaos experiment. Run the following command to stop the experiment: ``` kubectl delete -f loss-node-network-by-names.yaml ``` Or delete this blade resource directly: ``` kubectl delete blade loss-node-network-by-names ``` You can also edit the yaml file to update the content of the experiment and the chaosblade operator will complete the update of the experiment. See more examples: [Examples](https://github.com/chaosblade-io/chaosblade-operator/tree/master/examples) ### Execute with chaosblade cli tool ``` blade create k8s node-network loss --percent 60 --interface eth0 --local-port 40690 --names cn-hangzhou.192.168.0.205 --kubeconfig config ``` If the execution fails, a detailed error message is returned; if the execution is successful, the experiment UID is returned: ``` {"code":200,"success":true,"result":"e647064f5f20953c"} ``` You can query the status of the experiment with the following command: ``` blade query k8s create e647064f5f20953c --kubeconfig config { "code": 200, "success": true, "result": { "uid": "e647064f5f20953c", "success": true, "error": "", "statuses": [ { "id": "fa471a6285ec45f5", "uid": "e179b30d-df77-11e9-b3be-00163e136d88", "name": "cn-hangzhou.192.168.0.205", "state": "Success", "kind": "node", "success": true, "nodeName": "cn-hangzhou.192.168.0.205" } ] } } ``` Destroy experiment: ``` blade destroy e647064f5f20953c ``` In addition to the above two methods, you can also use the kubernetes client-go api for execution. For details, please refer to: [executor.go](https://github.com/chaosblade-io/chaosblade/blob/master/exec/kubernetes/executor.go) code implementation. [Chinese documentation](https://chaosblade-io.gitbook.io/chaosblade-help-zh-cn/blade-create-k8s) ## Questions & Suggestions If you encounter problems during installation and use, or suggestions and new features, all projects (including other projects) can be submitted to [Github Issues](https://github.com/chaosblade-io/chaosblade/issues) You can also contact us via: * Dingding group: 23177705 * Gitter room: [chaosblade community](https://gitter.im/chaosblade-io/community) * Email: chaosblade.io.01@gmail.com * Twitter: [chaosblade.io](https://twitter.com/ChaosbladeI) ## Contributions We welcome every issue and PR. Even a punctuation mark, how to participate in the contribution please read the project contributing document, or contact us through the above method. ## Open source license Chaosblade-operator is licensed under the Apache 2.0 license. displayName: Chaosblade Operator icon: - base64data: iVBORw0KGgoAAAANSUhEUgAABugAAAESCAYAAAAfYlPwAAAACXBIWXMAAC4jAAAuIwF4pT92AAAgAElEQVR4nOzdC3Bk2V3n+f9VS1ULPW3JZmkY22HJhsHA0CE1PZ4gGNjKjhnAD3Kk9gvb0JYKKFUCs5TaBMvEzrKlWpjZmSWgVDMspFRASYMBg8EtoQHMw5TEGg+LgZYw2BjW3ZLx+ynZbuyqkvJsHOU/q7NVKpUeec/533O/n4iMbhzYefNe3cxzz+/8/ydzzgkAAAAAAAAAAACAMLo4zwAAAAAAAAAAAEA4BHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEAEdAAAAAAAAAAAAEBABHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEAEdAAAAAAAAAAAAEBABHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEAEdAAAAAAAAAAAAEBABHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEAEdAAAAAAAAAAAAEBABHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEAEdAAAAAAAAAAAAEBABHQAAAAAAAAAAABAQAR0AAAAAAAAAAAAQEDdnGx410+eGRKRPhHZ/U/vFCcJCZo7ce3yWKiPdf3kGf9eVwyfxgURGTtx7fKGgWMBAAAAAAAAgKQR0JXMtZNnWiFcRf85ICKDZT8vKJ25kwHDuWu2w7lNH8ydvHZ53sCxAAAAAAAAAEApZM45rnTCNJAbaQvkCONQdssnr12uhDoHxsO5BQ3nqJoDAAAAAAAAgICooEvQF06eqbSFcgRywFNW9d4I4gvN1rFTRs//I//DtctWjw0AAAAAAAAAkkZAl4jPnzwzosGDf/WW/XwAe9hp5fhFgarFPt8M55YM3o/+PFS+6NrlFQPHAgAAAAAAAAClREBXYBoAjOmLUA7Y31ioUOrzzdayswbvy50Kwi+6dnnNwLEAAAAAAAAAQGkR0BXMPzy1p9wE7SuBA7vwxdcuz4c6XU5k3uD9ubPf3Bez3xwAAAAAAAAARJc557gKBfDkyTMDIjJJC0vg0Jbvvna5Euq0PXnyjN/X7ZyxyzR397XLYwaOAwAAAAAAAABKT6igs+9zJ89UtFpuuOznAjiCTW0BG8TnmntBmgvn/hHhHAAAAAAAAACYQkBn1GebwZyvmDtV9nMBHMPYPYH2W/tss8p11tjFmruHcA4AAAAAAAAAzCGgM+azJ88MicgUwRxwbAv3BNx3Tpr7zllqP0s4BwAAAAAAAABGEdAZ8Zmn9pgbLfu5ADogaGvLz5w84+/dQUMXbu4ZhHMAAAAAAAAAYFbmnOPqRLR58kyf7jE3Yaz6Biiy073XLgdpN7nZrHp9zNC5muslnAMAAAAAAAAA06igi2izuc+cDxH6S3sSgM5bDhXOKUv7zi0TzgEAAAAAAACAfQR0EWw0q+b8pP5w6T48kL9gAdWGrdaWqyIyYuA4AAAAAAAAAAB3QIvLwD598syIhnO0swQ679Izr12eCHFeP93cN/IJI9fQ77lXeea1yysGjgUAAAAAAAAAcAdU0AXyKarmgLz5kGoy1Fl2tlpbjjyLcA4AAAAAAAAACqOLS5W/TzX3mlshnANyNfmsa5c3QpziTzUrYU8ZuZwXnnXt8pKB4wAAAAAAAAAAHBAtLnP2yeYeVeeT/pBAfOtfcu3yQKij+OTJM2si0m/gcy9/ybXLFQPHAQAAAAAAAAA4BFpc5uQTtLQEQgrW2vITzdDdQjjnW3qOGDgOAAAAAAAAAMAhUUGXg4+fODOk4dxgch8OsGf9S6+HqZ77+Imd4N1Xz/UaOAsPfen1y/MGjgMAAAAAAAAAcEhU0HXYx07s7Dc3b2QCHyiDYNVzTmTCyL29cC/hHAAAAAAAAAAUFhV0HfSxE2fGRORKMh8IsG/93kDVcx+zUz3nW1sO3Hv98kbk4wAAAAAAAAAAHBEVdB3y0RM7+1KdT+LDAMUxG+pIDVXPTXwZ4RwAAAAAAAAAFBoVdB3wkRNnfEgwWvgPAhTLTiXZlwcIqz5ip3pu+cuvX65EPgYAAAAAAAAAwDF1cQKPh3AOiGY+RDinrFTPBdtvDwAAAAAAAACQH1pcHsOHCeeAmIKFVU5kzMCVnvvH1y8vGTgOAAAAAAAAAMAxEdAd0YcI54CYlp99/fJaiPf/0IkzPpzrN3C1qZ4DAAAAAAAAgETQ4vIICOeA6GYDHoCJ6rlQgSQAAAAAAAAAIH9U0B3SBwnngNg2/f5zIY7hgyfODIjIKQOfmeo5AAAAAAAAAEgIAd0hfODEmQnCOSC6+edev7wR4iCcyISBzzv3XKrnAAAAAAAAACAptLg8oA8096G6WIiDBdIWpHpOjRg4k1TPAQAAAAAAAEBiMucc1/QO3n/izJCIPGb6IPPhWwmuiMiG/lN2/TvKxVeTDcf+m3ze9ct9Id7o/SfO+HDu0RDvtY/l512/XIl8DAAAAAAAAACADqPF5R28v7kH1ZLpg+yMdf2cK63X8wK1EUQxvP/EmSkDB1q26jkL5xwAAAAAAAAA0GEEdPtYP3GmTwOBXrMHeXSb+tl8KLfUzx5X2IfeC4MGzlGwsNzFD+jW+69fDhlIAgAAAAAAAAACIaDbh2tWr1gIJTqlFcrNDzDxj0NwIlbaLAb5u11rtrWNHcxzjwIAAAAAAABAogjobmPtxJkxERk1eXCHtywiswPXL88W7cBhxpCBA1kdCNd2dSzQ++yH9pYAAAAAAAAAkCgCuj080ayeSWFyfM5/judfv7xi4FhQYEYq6IJVlBn4vKvPp+0sAAAAAAAAACSLgG4PTmS24PvO+WBu8gVM8KNDnI0KuiD7zz1uY789ql0BAAAAAAAAIGEEdLs8fuLMZIH3nSOYQ8c9fuLMgIXA+gXXLwcJ6MRGtWCozwoAAAAAAAAAiICArs37ToxXRLLzZg7o4Pwec5NfcX2GSX10nJPMxP5zod7ISRY7oFv/iusztKUFAAAAAAAAgIQR0LVxxWsrt+mDua+8PpPCfnkwqkztLcXG/nPB9toDYF81q/kq5gE90Pbvp/b//CA2RKQ9/F/R/0wWXZ0FPgAAAAAAAIER0Km/OzHuW1v2mziYg/FVc2P/5PoM7SyRKyMBXbCKMhe/xS0T5UDJVLOa3/tySF8Dbf/s9LhkeK//sJrVWv+63Bbk+fHFyqKrU9ELAAAAAACQAwK6ZjjnJ8GK0tpyp2run1A1h3D6DJzrIEH03+20uY2OgA5IXDWrVTSEa/3TygKhU/rPm0GehnerGtotEdoBAAAAAAB0BgFdsVpbrovIyFexPxUCck9N2EbzVYH2VzRQLbj6VddnNiIfA4AOq2a1VhhXuV0Vm3GD+hqV5ufZ1LDOv+YXXZ1qfgAAAAAAgEMqfUD33hPjI2IggDiABd/S8oVM3iOg954Yt1A9tx7qjdzh9nPKA+E7kAgN5cb8wpqCtdA+iF4NGv3rYjWrrev+mbNU1wEAAAAAABxM6QM6J1KEVpFzX319ZszAcaBkjOw/F6wyw8DnZWIbKLDEQ7n9+M96zr+0um6WsA4AAAAAAGB/pQ7o3nNifLIAE2inv+b6TFFacCIxzsbHCTbBSwUdgMOqZrU+DeQmtA1k2fW2hXW+sm5Kwzo6AAAAAAAAALQpbUD3nhPjAzqZZhnhHGKrGDiGkJO6UQP7rwm01x6A46tmNT+OmNRwrpdTuif/nXpR22DOaVDH9xwAAAAAACg9KXNA55qTapYn1E5/LeEcIitTBd27m6F9TMH22gNwdNWsVtE2lqOcxkPx52u0mtWWfbBJUHcw+vc2ogtmNhZd3cLCGQAAclHNakYeQQtrVRe4runLP0uv0XY8H9pJo6KvEV2MNpniZwVSx3MXYiplQPfXJ8YrxifWTv9TwjkYYKDlo4SqoDPwWYPttQfg8HR/Od+u8RSn71j8+btKULc3rcwcaZvsaV9MtmzpWAEAgDmtdutPG69Ws5roOMKPu1YWXX2eS3c0+kzQmsjnuQAoqF3PXcO7PgXPXQiqrBV0lle0EM7BEgsBXVkQ0AEGtbWypGKuswjqVDWrjbRN9FjfGxkAABTTqVagpIHdgoj4oG6evYJvr22/6dbiKcZqQAHtUfHKvQwzShfQ/dWJ8THDq1we+TrCORhiob/I1wXal82JDIV4n30Q0AGG6ADe71V7nuuSq1ZQ5/eomyjDBBErrwEAgAHD+rpSzWoLGtQxH/XUWG1EX4MH+K8AMIjnLhRFqQK6d50Y7zNcPTd33/WZKQPHAdxUpg0AnEhf5ENg1SJghFY0TbGqLihfoThSzWq+mi6p8VDbas0RVl4DAACDdsI6Pw7z+6j5cXCZquq0Y0b7WK33AP81AMbsqngd4V5GUZStgm7C6KTI6n3XZ8YMHAewW+zQqkzYuBuITAf0s3v0oEcY/gHqogakY4uuXtjKYlZeAwCAAurX7hET1aw2lXJQ19ZivMJYDSiualZrb1vJvYxCKk1A95cnxgeMtqna1C8SwBxXoh+3MlULAriVPqTPssrOBN9+ZKVI1XRtK69ZrQkAAIquN7WgTsdqrVCOxXhAQVHxihSVJqBzzVZVFo0MXp+htR1MKlmLSwAlpFVzU9piEXa0qukqWk1nbqykxzbCymsAAJCoVlA3Vs1qfq/g+aJ8TFqMA+mg4hWpK0VAt3Ji3OoKmUtD12eWDBwHsCcCunD4LgDC0zaEswzyTfPjt6VqVvMhnbVWwFcNHAMAAEDefLj1aDWrLReoDbkf5z9q4DgAHB/3MpLWVYbL66vnnE7AG3qtDl2fmTBweoDbsnC/hFKWzwmgSVfhLRHOFcKghnQjZT8RAAAAEbXakDMmAwCgQ5IP6B7rGR8TJ4PW0jlxQjgH+0joSOiABPm9zXQVHv3qi6NXV26Plf1EAAAARNQak81yEQAAOL6kA7q/6BnvM1o9d+n+G7Szg33kc+RzQGp0MuE8F7awrhDSAQAARDdazWorutcbAAA4oqQDOicy6UR6jYVzm/64DJwe4I4I6AjogFT4yYNqVvOLY0a5qIVHSAcAABCfb0O+pvs6AwCAI0g2oPvznvEBETln4FB2m3jgxsyGrUMCbmuVUwOg6HRl75Lum4E0ENIBAADE16t7BRPSAQBwBMkGdE5k1ljlnH+tPnBjhj7dKAwnskEFXZjXnzUXFQDosLZwbpBzmxxCOgAAgPgI6QAAOKIkA7p39oxXnMgpgwHdhIHTAxyYhfsmFAOflYAO6DDCuVLwId1I2U8CAABAZIR0AAAcQaoVdBar1BZedGNmycBxAIXiA/dAx7vCXwaQDsK5UpllMggAACA6QjoAAA4puYDuT3vGJ5xIP9VzwPFZuX9CMNDOM1QQCZTFPOFcafRqSNdX9hMBAAAQGeMyAAAOIamA7v/tGe9zIpMGw7kL//zGzJqBUwQcihNZKlFAF/vFAwzQIdWs5ivpT3E+S2XQaAcFAACAsmFcBgDAAXWndKKcyJSu1rFkXZrHBRROqHDsDirapi5XLn6LS9qAAB1QzWqTIjJagnO5eYTWvKmHlsN+P7pFV583cCwAAABl5sdlE4uuznwYAAD7SCag+5Oe8SGjE3KT33BjZsPAcQDYh79P/6RnPOYpotoHOCYfzojI+YTOo1/ks6aLFNb0tbLo6scaV2jLIT9uav3TvwYSaQnqWyoNHPccAQAA4NguVrPa0qKrs987AAC3kUxA17BZpbb8jTdmKOtHYTWak8KxJ7uDVZY1mpPh/aHeb7d39IwPfeONGR5egCPQzeiL/pu7rnvnLWkQl0t7bA2vWpXJN6vN2oI7X7k8UtDArlf/DkYMHAsAAEDZzdItBgCA20sioHtHz/iI0eqTSQPHABRdyL3Z1mIGdDopTkAHHJIGS7MG21wfxKoe+3xegdxBtQV3/jWp53VEX8Mxj+2QfEulyqKr594eGQAAAPsa9C3oF12d+TEAAPZQ+IDuj3vG+4zu8Tb3L27MMDGEQjOwL5uEDN9dM6CLGfZX2LMSOJKpglV7bWooN2u55Y8GdrOttpEiMiYiEwUJQme1bScAAADimqhmtSlakAMAcKvCB3SuOVEUs+JlL5s6gQUU2r+4MbPx9rj7su14e8943zcF2MtRA7qYhkN91pDe3twjtGPB4zfdmKkU4oMjCN13zuIetHtZ13thtmgTFFrd56vqpnSMYz2o669mtbFFV6fVOAAAuJ3lRVcv5LOF7xag/zrU9rK6YK1Xx8BjBo4FAABTCh3Qvb1nfMBoEDaV2gQ7Sm3VwEB/qG2/pDxZ2HOv0r4nVCL6jLYhRsG1tba0zi/c8a19Cl8hq8Fie1AX+ztzP5MJ7EsIAABwi7ZW3jefk423Jx/VVpexF8UCAGBKV5EvhxOZdCK9rvnvVl7r33Rjht7aSIYT2TBwbwXZVNpX0Bn4rMmtKuz0OQLaFGHfuUu+1WIK4Vw7H9TpXiL360IOi3aq6IweGwAAQEfp+Mx3avAB3TNF5IIuFLOCuTIAAHYpbED3Rz3jFScyaiyca7XcBJLhRJbKEtB9840ZCwHd8B8199ZM6W+IgA4dp60tra0MbudDq/sXXX0i5f02/B56i64+pEGkRYzLAABA6bQtphowFNSNapUfAABQRa6gs7jyZvl/ujGTWms6wEILiiABnVoO+F63Q8UHsA99sLdckXbBh1Y+vDJwLEH4IFJEThtbpe0NVrNayN8QAAAAM9qCuiEjXQ941gUAoE0hA7rlnvExJ3KK6jkgf0baPgbbA8+JrPBd0llU0CEH/h7pN3hifTj1kE6ClI5vqaT7aFoL6RifAQCAUvN7v2nXg7nI54FxGQAAbQoX0C31jPc5kSmD4dylUzdmSrNSHuVx6saMhRaX/t6vhDjpRlp69i/1jCezspCADp1UzWq+Tc95gyfVr0iuLLp6qSvptWrQWkg3YuAYAAAAolt09bHIIV0/3Q0AAHhKESvo/GqbXgPH0W6TzW6ROAutMIIEdCJiJWhnZSGwN4utLVvhHAt1bIZ0vbpnIQAAQOkZCOlocwkAgCpUQHe1Z3zAiZw3WD03Wbkxs2HgFAG5MNL2MUhAV7kx41t6rhv4vINXE6mio4IOnVLNav57YNjYCW2Fc4wD2mhIZ2mhAQEdAACA0pAu1kLcUItvAQAwr1ABndHWlusP3pixuJof6BgjAd2pUFfUSJtL/0qiMpeADh1k7Z4gnNuH7kl3ycjhMBEEAADwdLEWhA5Ws1of1wIAgAIFdG/rGa84kWGDAR2l+UielcDqbeXah86/+t/WM174VpcEdOgErZ4LFtQfAOHcASy6+oSRNsn9un8hAAAAnup4EKvVJYunAAClJ0UK6IxWzy3/yxszSwZOD5Crf3ljxkIFnX8FaVHmROYNfc9M/kHPeKEnlQno0CGWquf83mpjhHMHZmUxExNBAAAATxdrjD3EdQAAoCAB3R80K0gGDRzKblTPoUyWDXzWIAHdv2ruKWmh4sPrFRHa6KLUDFbPjeiKYxyAnqsLBs4VE0EAAABtFl19TUQWIpwTFk4BAErP67Z+Fn6/Z7zP4J4z3oVvuTGzZuA4gCB820cDE+T9v98zPhDi3nMifu+ki3m/zwEN/37P+Ni33JiZNXI8h0LVGzrA0oKYC4uuTvX84VlYaMC4DQAA4Fbz/pkz8Hmh9TgAoPSkCAGda4ZzvQYOpd0mFS0oG9cctJ838LFHQtx/+nmtBHTe1O/1jC99awEXBhDQ4Th037BRIydxddHVLS4aMk/bgXLuAGAX/Z0b0CrfPn0dtuJ3rW0Rws4iEhaTADgE/+x7JfAJ6+cCAQBgPKD7vea+S+cMHMpuE9/abIEHlMa33phZ+b2e8U0DgflYiIDOB2G/1zO+aqi9rj/v87/XM17h+wclY6l6LkibXQBAmqpZbUjbug3pq1PjzPYuFzsL6qpZzf9jXURW9OUDuxX2TwWwm/9eqGa14M++1azWx3cSAKDsTAd02mLOmtVvK2ibOeC4tM1l6NYXuw3+bs/4wLeVr82l6APTVNH2v6SCDsdk5e/9gu7RAcQyVM1qoSti2t/PT/Cvsf/i7VWz2pTBvQ439Nq1+Gu6wXUMQ6vjRjSUq0RY6Navr+G24G5V/w7mqbI7Ht0jd3fFYxH31ZpddHXmOBAjKBvaNdZAAIxXAByGLjDr0//KXuOc1lho5Ta/Je2dHlgstgezAd1be8YrBva72suEvUMCwnBxetPvZSxEqzSDbS690bf2jK+9+MZMYVrFEdDhqKpZbcRI+5t1WlvCgN4IY+Nb3k+rcloT/DsvHrJuGjL6/NI+dmuvrlptq6xaYhFCZ7SFcmOGOjG0G9TXuWpW29TWdj6sm7dziLb4Kh+9vyttFZDWtuE4DgISiNjYcx5hMF4BcAtdeDTU1nr9sOOdA32v6H293BbMr2hwV9p723IFncUVXAsvvjHD4BVlFqM3/V6CBHQvvjGz9tae8WWDg9fzGtKx0hWps1I9x+Ic4OluTvBL8yFrQSf4+V0qltZ13Nnns62yaorJr8OrZrUx/d0q0gR3r17/0WpWW9dn8Fmu/9OC1hFCCwCIivEK0GG6+KjS9gq9qKw1troZzOvCsaW2Tg+lub9NBnS/0zM+aXDD2E0m6FB2L74xs/E7NgKr/t/pGa+8JEBgrm0uLT6UX/mdnnF5SQFCOirocBQ6YLRQsbtMVQFwR/5eHdaWSVNM8BdWe2XVqk58EbruQ3+rJjSYs/b8elj9WrFwvprV5vxiuLLdx3o9R/SaWqx+BAAwXgGOpACLj3pbz5W+m5kuHpvXZ8uk292aC+h+u2e8z2gQNvXSAHteAdZp20cLX+RjIdqx+ADst3vGp4y2sbny2z3j8lLjIR0BHY5oxMiJo7UlcHC9bRP8l3SCn/aXxeQnvq60ha5TXMuntAVzE4m1OmxpVdWVIqjTCatW0Jri9QSAVDFeAfbRtvioaF0eRBePndMwvhXWJVk5ay6gc80vVGuD4nX9ogdKz9C+bKO/1TM++bIAwbl+L53P+32O6Mpv9YwPGN87wtoG1CgGCwHd6qKr09oaOBr/MDXmJ0zYw7HQWqHrBNeySVtZWuz4kodWUJdk4K6TVpOtdr0AgMJivAK0SXDxUXtYt6zj0mTmakwFdP+tZ3yo1VPYmMlvvzHDCgxARHwg9t/s7MsWZC86bXNpNaCTVrWCgeMAOsJQe0sW5wDH06vVdDurNlNvTZK41rUc02tZusUL1aw2pL8LZdyPrBW4T6TSRsx/Fn2OoGIOaKpwHpCA0o9XUG4azE0azVc6xY/Fr2pV3WQKY9MuA8dwk69ScdoOzdBr+dsLsMcTEJIPrIzcoxOLzb**fr2GzNrTmTO4PdTKV8oBQvVc5vsZQB0jG8/9JhOiKPY+vWBeF4XU5RCNav5iY7HShrOtfRqG7GlIl97H7RWs9qKdgQhnAOeMhDhXLBwB3kp5XgF5eX/zqtZzc9fPJF4ONeuX8emK9WsVuhFJmYCusWecasbFDKRANzKt7ncNHBeegNO5NMiAQjHwuCKcA7ovItMlCTDVzkX/mH4TnSyY4lOBU/jn9nXtDK2ULSiYkkXDQBQWnERvG0ve4UhgFKMV1BuupBsrUTB3G6DbYF8jMUmx2YioPvNnvE+o9Vzc9UbM6zoAXap3pjZ8HvRGblPgwRn1WYV3XLZq9csvFAKFib9aG8J5MNPlBS6Agc3tVanJ7mgUSfz1kpeNXc7fpHco36fH5uHdytdVX6FqjlgTzHCi1UuBQJJeryC8mrrCnCe8c2OViBfuHvdxB50zu1UqVnbZHuT6jng9pzbqS6xsDqjf6F7fGx4K/9WtM7thIFX834foMx0j5/Yg8vVRVdfK/eVAHI1qBU4FfalS4KvjBxadPWxVD6QVlpdMXAo1p3TlcpjVithdDEAVXPA/mJ8f1M9h9CSG6+EoL+jQ/pWA5Ha4WIXrZqjw8OtevVeb+2BXoh5negB3UL3+IDRIGxqeGuGAQNwG8NbM0sL3ePrRsL1yRDt6PQzz5W4bBwIgfaWQDn0aiUdIV0aRnWBRaXoLcu0KuycgUMpiuG2e9nUtSecA+5Mq4VjVAovcXkQQTLjlU5rC+Iq+s8+ugjYo9dpnmtzR6e0mm5k0dXN/95Eb3Hp29M5kV5jLdTWh7dm2G8KuAO9fy3cs/3z3eNBgn5Dn7m0LyTPQkA3b+AYgDLwIR170qVjsOjtS7UNIuHc4Zm79oRzwIHFmvticQ5iKfx4pRN0n90xP/apZjVfZfRp7Rh1XhffEAAZo+HyCtfmwHqL0t42akD3aPd4xYmMGpxkprUlcAC6D92mkft28tHu8dwHWCNbO3vRXSp7SBbzheTFDujWaW8JBNXPJElSCjvppeEcXRKOztq1nyKcA/anLcBiTfRSQYeYShnStYVy8xrIXdGxj7Vtp7CLVjsvca2O5KKO882KXUFncVPp5Ye2Zlg5DxzAQ802sFbu496A7XIndZ9KAB2k+9jE3n+OMQAQ3qDR5wIczWDRvksJ5zrGxISn7svC9QT2oePuWBOWq7QXhAGFG68clQ/jd4Vyw8X8JOWkeyNfNTBXUmSjlkO6aAHdW7rHx5zIoMHqDDYLBQ7Bicwaun/Pv6W5r2WufDBJq8t4LyRtyMCHYzUvEMdoEdqP4MBOWV+p2qJ/d4Q5nRM1pNP2T+djvDdQFG17GMWa7GVBHKwozHjlKLRazneHeZRQrpg0nLtS9vPQIWZDuigB3W90j/c5kSmDE7+XXr41Q1sr4BBe3mz5OGfoPg7yZfvyrRn/HbZa9rAsxgtJI6ADym1SJ9eRBvOhq056XDRwKKkZjFiZw8Q/sA8j+zNyn8KS5BaJtQVzV2iJWFyEc7kwGdLFqqCbMFiWuRlxc1yg6CzdO6d+o3t8JNB7UXELdFbs/edotwPE1Uury+RctBq66nHx95afYW01GYy+HxORwG3o917scM7v97zCNYIxZscrh+H3KSOYSwPj1FyZC+mCB3Rv7h4faIicb4iIsdfkK5r7aQE4pDZ5DXYAACAASURBVFdszaw1ROYM3dOzb+4ez72tziu2ZlYaIhcMfp8l/ULScm9RewdMFgDxndLVokjHfOw9yXbT45llL4/cnfeThSHeSK8pbXKB29AKodjhnDDhDMPMjVcOyh+37jF3lWCu+HSP0CXGqbkyVTkbo4LO4o/x+qu2ZhgkAMdjqYquN1RbnVdtzfjPvRrivYASiP0wQUAH2DBV1AkS7KnfYKeSKQOT1GURasLTYpceIDqtqFnRdr4W7pFk9/tC4Vkcr9xRNav5DlJr7DGXBgN7hJbJxVALye4kaED3a93jFScybHBPI1bpAsf0Knt70Q3/WqBWl/47pOz7woV8IU1GBkYEdIANvVTCJOeclQdgncgaNXAoZZH7wjmq54Cn02qaMQ3mrhpakDBHO3kYZ2a8chDVrOYXHD1KmJMUFpGFZaJyNmhA50RmDU72Lrx6a2Yp5HkAUuVEJp3IpqH7e/ZXA7S6fPXWzIoTeaTswVmoF5JloVqGgA6wY4IquuRE71jS1tqyKHyXhuV9XpsF+RzDGozmZYTJSZSd36/It+vSNnef1j2orE3yFq46CaVkvsOahvB+LvucgcNBh7CILIperViMqjvUm/9q9/iE0T64rLQDOuQ7tmbWfrV73A9mzhs5p60v2txXQH3H1szUrzYr9k7l/V5AomJvyr3Jil4Yt9nhEHnA+B4V/jd8rGB71cQO+a2PQQZ9Rceiq8cMyCYNBznLut+If60c5jdJV/sP6Zi3YvQzzvo9VXL6rQ3SNQNJ6itStYwaaNu3eUgXuRXhGdRXz60ZOA4wXrkTC+OV29L9yeapskpLAReRpcTvgT6x6OrRnjuDBHRvalawWFwpc+E1WzMMEIAOcs2JNEt7QJx6U/f45Guae8XlyjUnB1bYlBc4ktiVMlTPwTo/Yd/xSURdqVnRMMzapP5EkQK6RVePvvDPV1DohO2I0b1IJmNNPui5sbbSfFX/xuePE1wtunor2Nu5X/S+trYKu9W6tqNjcp3QYt8dHNWgtoBE/qieM4LxyoFEG6/sR8/bElXjSZrlukY16SvQYy0kCRLQOZsrFTcLtiIXKITXbM1s/EqzYvaKoeM9/yvd4yuv3ZrJtWxZP7sfYD6W5/sAiYpdQUf1HEpp0dXndRWub4s1phMSVhaa9PvKBg0fcACLrr6iCw5m2/blsrRwqj/iqnRLz36+Wm4yr7/t1n1dzWqTek9bCerOV7PabIcnPyxUP62LyJpOmrZsFGjxD4uWkbdLVM+hHeOVwyOcS5dWchd5sdFy279b79JyO736rBClK0PuAd2vdI8PGO2JO/HarRkm44AcvHZrZvZXusfHjLUu8MdUee3WTK4Pyv5//1e6x08bCygB3BkVdCg9nYSY1Ul9K+2qx3ZNeuOAtCLLrwZtdTewck2Dr0rXiQ8L41K/SHRMA7Tc6YT4mN7Ts0bOwaTe150SK6Bb0MUNx6p+BEpgk+o57Ifxyp1piDlPOJesIhQQtVqx+3mTjYMsMtN2rAM6VmtVzVoO74ZjLQ7NPaBzNvunLr9ua4a+rkCOXHNgZamSzA9kln65e3zgdTmH8z6g/OXucYttlADLBrg6gA2Lrr7T4kMnAmI/RBVtbyBz2ia+5nWiKfaeJX5V+kiokEpZmBz2ExsjMcIcDeoqfn8NEbkY+v13GfWBYQeraUJX4C9ryEo1EHAwY4TYOAjGK3vTcG6JrVTSpB1MrO4nuKD34tJRvsd1rPS0DgNaCTpmdHsF0WeG4M+fXXn+j/9S9/iIEznlmpP1ll6s3gFy9rqtmRUncsnYvd/rRJZ+qbkvZq5etzUz4UTmDH7/Ff6FZMV+4KCCDmijrYeGdI+smPr1QQ7HpNfUP3DOGTiXnayg2peR6rk5v4dk7Elq3fz+Qa1oiamTz+Mhvx8u6HUknAMOZi52uIHiKet4ZR8Wwkrkx2JG4e+95y+6+k5A3cnxq7+//T6Yi67u52VPa5twS07ps0NQuQZ0Rks0575za4Y2OUAYkwa/bP3AJkhIp+0ZYk9sAjgYVvYCu+jDWMXAbxlVdB3ir+miq48ZmPQa1rY3IcSe+JjTc26Ctu2pRA7pRjt4/UOtvvbXkYW+wMGt6vMwcGglHa/cQivfi7w3Gfah1XOWKiOXNZgL0inAb6+w6Or+/rqQ93sdUvDxXm4tLt/YnPwOMQF+KF+ZSdf1k2eq1o4LSNGr7hL5w0b2iU84c6X4A/r9lOuE/HduzWy8sXt8hdVOAICi8hMkvr2PVpnGakNSKcjeDIXhH7yrWc0f7mjEYx7J+7rqpFrM6jlT4VyLX72s9/XViIcxcdzJe237FcImQQNwKJu0tkQnlGW8shftIBG7LTXyZWnhzyPaaSE4Y9srSKuKLuRedLlV0H3X1syGk2zSSSaWXh9w2fAnndyb1+cG8JS/bGQv+6TLHjD2PbDpJKt819ZM7qtBfrH7rP8OHLX2PVj0F9JD+zrANl1BGTNk4DsiBxocLUc8hBCVkTEnPlYthnMtOunwSMRD6MS5CfXdcKS9V4ASG9E2hcCxGRivjER639lI74sAdKGUhTDKL6g4HSucazG0vUJL0IVZuba4fHhresqJrFva7+gfRJ7x1y6LufICKIVPOLn3CZe9dtvWnmebTqTy8NZ07g8L/7X77JgTOV/2/eLyeCFJFirumUQA9qF7yCxEOkdsip+fkYitDocDVEDFmlTbjPjeB6aTMbHu615t7VQEjBGAgzsdsuoApRFzvHIqYMX2jmpWm6QTU/KsjIH8ggoTYbCh7RUkdHvb3FpctrjmH1zM1hm3+IjLvvkJJ299fubeZem4gJS822WjPhA39pEmRgOEc3PdZ/2qjyt5vw+AzmFlPHAg0fbBCN1mpCy0helExHFLRdvZdJyGP7Hask6E2LujQ/x5Wot0rkaoEACSctrKRC/SkvJ4ZTcNBc6HeK9jWg24j3vMduUdp9fYwt6C5hZU6L1e0cVRsRdpjoXqxpF7QDe6Nb0023122dLNtCUi73XZ65+fuR82cDhAcp5w2X0+CDdW7XRhbGs694eF2e6zfmUVE4gAgOT4wKGa1eYi7wOCDvOTqRpmxXheG8pxwitWBdtykSaodSJkMtI+NztVlEddJOMnlXRvorwFW0ENFBjhHHKV8HhlN4v30YIGJn6uayX04tZqVkutmZKF6rk5q9/ZbXugPxb5UIIFdLm2uGxjrnXFhpMX/pXLQux7AJSOD8C3bH3ohbGt6VB7kCxFXK0NAEDeQk1O7MY+dPmKtVdbLs9j2ooq1spkSxv+H4i2ulyP9PbmW4H6RQm6mhvA3i4QziGQpMYru+lvjZUCF19sc1pEnrno6r4N4qRfGEPnmY6InZOsh95j7bB0T7oLkQ+jv5rVgjyDBgnoxram15zIJUt7GPl9sR5vZGeeFLk7xDkAyuJdLqt82skLDd3v6y7Qj9+V7rN+383Bsu8Rl/cLABCP7kUXYw8QC3tVJkvb28QIaPJ66I1ZPVfUTgqxJj2Pe61CfR/N62puALeaIMRGCAmOV3azsMjHB3MPLrq6by8/SyDXWRr4xG7dOFmQ6xpzAVlLkPnkUBV0flJ10olsWprk/YzIPauN7BWhzgGQus+J3O2D721bgc7I6a3p3H94fqH77IgTOVf28CzECwAQHa2c0zQV4VP1arVbp8WaKI5xDjtCq19ihO/Dx/wbyH1/aeU7ZDxazWrzuncMgKffH1e1/SCQt5TGKzcZqJ7zY4BHNJhjrJ+f2IsZCtOKXUPE2KF1kMVZwQK6725OkJsrn/ygy175cSf3GjgUoPDe08he+lmRewx9jgvfvTWd+0P7LzT3naOlBwCgLEJNiCOslNqXxqh0WtcK0yKLNZ49zmTVWgeP4yB869SValabIqgDbnGFkA4BpNpuPeacuQ/nKtryGvmK/R1ZqLnLiAvIWoK0uQwW0EkzpJt1IquWKjG+4EfXLvv+kOcBSJGvnvt7l40Yur9XvzvQvnNOxH+39Za9si3UCwAQHQFdghZdfc1AG5lj04foGPsBFz2ck4gVgMcJ6GKs8vd/X+dE5AmtqKP1JfAUQjrkKpXxSjtd8BFr79xWOMf4PmdahTkY8RDWC7pfaOzgOPeqx6ABnTQnbyesTfZ+3GVf/z6X3Rf6XAAp+etG9tLPidxj6N4O8lDwc91n/XfacNlDs5AvJCl6/3P2zQAOhb0o0hUj7Oj092+s7/PCd1PQSc/VCG99nGsWe0JxWFtfrmlVXah9igDLCOmQtxTGK+1i3S+Ec2HFnnMo6mKy2GPs3K9bd95vsNv3bk0v/Vz32YWIKwNuseXDBZf9wFdkrmblmIAi8dVzH3CZpZWjF743QGvLn2u2trSwiS9QaP6BoJrxEwygPHQF7ZC++toe/AYMbBxfdDEmP9YTmtyaj7C6e9DfE7rXyKHoGGLdwH3Tr1V15/R4/OTxfAJtT4Gj8oH1ChP/xcZ4JZhYAd0Y92hQsQO6Qi4m8wvIqlltNWL1YXoBnTSrLCYsBXTeppNn/0Uje9nXd7nfMnA4QKG0queMHPP6mYCtLSO1UAIAAAWiE1wV3RttKHJ7m9TFqGCKsZI/L/6znI/wvkPHOI/zGo5Z4SetR/1LFyAttAV2offMA2Lxz8lLvnXfUcJ3xMF4JTytvI4Rdi6wiCS4mFX2RV9MFmMBWUuvv0/zPH9RArozW9NrM91nL0Qa+O9pW0Qed9nrvkrc0j8SedLKcQHWte89Z0SQlUcz3Wcr1hYaADiWPk4fgE7SSa4RfTFmCEDPeYxJrmQCukVXX4pU1V45xnmcNRbQ7Tasr4tt1XWtwI7gAinr1b91Wr8axngluhjVc5sRq/bK7FTEz170sWqsBWQtQ3m2VQ++B12bKWubevoKoD9tZK83cChAYbzPZS8yVD23ML41HepHp/D7jADGbEY+HCYOAHSErxaoZjU/TvCVMleY7Aoq1nd5ShV03nKE9xw46n9RVzSbmlvYR6u6zn83fNq3ANS960Z0khxIjW9hy7YQBjFeMSPGdjFTLBAJy8AetYUeq/oFZJEPIdfrF6WCzhvfmt6od5+d1B8BMz7sspe838nvPi9zj1s6LsCqx132moadY5sI8Sb63UW/daCzViKvKAOAY6lmtYoYbOVfMjH29thMsG3hUoTf5ONOfPgFwBc7dCwhDeprpwJQ91hpVdelFvyivM5Xs9oSf9M2MF6xI2J7yymr5yRhR16I1CEp7DW4HHHOKNeALmYFndS2pmcjrc67rWsi8i6XPWzpmACr3uOyb/D7Nxo5vEu1rencJ0fq3Wf7QgWBAIKigg7AkbStQL/KZFd0MSqQUpjw2C1G4HjcfUVmDVTjd0IrrLtazWrOhxrVrDZhYOU9cFyzVInGxXjFpBgLi+aonosi6u94wfefa4n5GdKsoGtxIpP642DGJ5w88Fcu+2dfl7k/s3RcgDXvc9m3bds4Jv8wHqRthmuGc70h3gsomdgVdEwYADg0bdtlZl9tRJn8IKDrED95fNRqRD/Z6FtFJng/nmqNj6pZzT/zzLN/XUdtFvweHihQZ5d+XehKu8sIGK+YFSOgmy/IuUlNzIBuNZFzGbNjRa7zwNEDuu/bml76me6zc9qH3YQbIvJel30vAR1wex9zcu8nnTzgbJyjqe/fms79AfVnus8OMKgFchN7komV6QAOTKtZZjtQ9YPOitE+KLX2ljv7fFSzWoy3Hjjm+ZyStBfT9eq8yc4edtWstqwTrfMJtlkNZWXR1WNMkHeUD7f1/qnoy2rbeN/qcpa/13AYr5gX/Ptn0dUJ6OKIuSDY7wVqZPq2uHx74LxaNUdtcdlmwlo7Ct+27x2N7FUGDgUw6T0ue/ENGwe2GbB/Nqv9gPzEXr3cS9sdAAfh282JyGNMdpkUo5IkxQq6WI71O6wVZWVqRX9K9917oprVVqpZbYyxTDn5wMtPGi66+qQGjs8UkdPWtpRRPFMHwnjFNg3WQy8oWSjaeUoIC4JxWyYCOl/54pxMOSdi5bXtRJ5oZMMfdXKvgVMEmPOhRvZtRu7XINVz//ddZweck1FL31NlfSFZFto0MWgGcFt+4rua1eZ1QhzGEEx0XIyJ/WP/Di+6url97gPxE/BXROTTvkKJPevKzYfV/l7QsO5+v+eUoRMyqsEEcsJ4pTBoy10ubJVTfLnds1Yq6OQHtqcnnci6a+7xZOL1pMgzVhvZKwycHsCUv25k3/CkyD1G7tXZEOfGiYxZ+n4q8wtpyqtVwCExmQVgTxr++O+pYc6QWVG+w438fuHpxqx16AnMt8B8rJrVfKvSwrdvxPEsurpv4+nviQcN7UNEFV1OGK8USoxxC2OWCFhElozcrqOZgE6Za0fxIZe95G9ddp+BQwHM+Hsngw0bBzP3b7anc+9f/9N3ne0rWbscIJbYk2kEdABuoZUoK7SIQsnE2COqI1U1ur/VWCf+twrOt8C8SlAH0cUEi67uf88uGDghVNHlgPFK4QR/9mRRUTTMM2Bf3ZZOz7/Znp7/L3edXba0oe01v9dWI3v5V93l3mXgcIDoPuvk7o+77GVGKpmC7D3nmg/4lsvRF4y3KhjQlbzAnaxEHgMwcAbwNDrZtURbGpRQYQM6aU5Czlezmg8iznfqf7PAWkGdb3M4qQEmSsrvU+dDWxGZj/zbNsEi2M5hvFJIoauq1hM5b0Asuc0XmQropDkR3trE1IyPOnngzxtZ5YEux0oDlN7fuuxFT9o4Ccs/uD0dJJRyth8c5n5we9r0CuH/fNfZCgEdDmgpckA36NtP+H07uGAA2tpEMdlVDDGqMZjsMkyDCBaKPcWfh5FqVvMhXZCFjrDJV9FoVWXM37gxArrO0PFK7MAVhxf6uZfFGcDxlKbFpeiE+yUDh3JToxlKnPGVQ0YOCYjmg06+3sjZD7L3nIZL/SHe6wjMh3PAIVl4aKAFFADCuWKKEdAx2WWc7r01V/bz0MZ/p13Utpe0GCwxvzedjntjtZjvrWa1kbJfh+NqG69YnbOAHYxZAKPMBXTSrFaZdCKbrvnvJl4bTu5ZcdlLDZweIKpPu6xi4J7c/MHt6SABnW9vaem7qO1VmHCu058dSbNQqU5AB0B0IRB7uAAJIKTbk6/cWCEgKTcN6WI+U/L3d3yMVwoo0r6gBHSAUSYDunPb0xsNkcmGVq9Zef1tI3v4I07uNXCKgCje1ci+4R9s3I/zIT7/xbvO9jVERq19FzVEFs4VqHKu058f6dI9UWKt4m0hoANKrprVfMut4bKfByAlGtKd5qI+ja+me9S3vDR0TAjM79cYsYsVAd0xMF4BCiP0foMoGJMBnffI9vSUtZ7+XxCRP29k9K9Haa07GTQSkITaM8HiA8Nq5FWOQN5iV9EN0vIJKK9qVvObf1/kTwBIz6Kr+0qT+9k78Bbnq1ktSHcSmDUZ6b7ojVRJVHiMV3AEVNDFM1TWD46DMRvQidHWcn/vsm/+G5fdZ+D0AMFtSDZk4D5cf6S5V2XuDH4H+da/I49sT28U6a+fFpc4JAttLlnNC5QXk9RAwrSl35C1fe8NGCWkK69FV9/QkC4GArqj4X4tNvbNLRcLcxwwzHRA94bt6SUnsmxpgvyGL19pZK83cHqAoHx71yedPMfAfRikveVP3nW2z4mcMhbQjbxhe7pwgyoCOhyShcErVapACWmbN/ZxARLnw4hFV/et4Z4vIstc75t8SBeqUwmM0QrTGFV0BHSHxHglCXRsAXCT6YBOmZsk+4STF/73RvYyA4cCBPM+lz1w3cbpDrVSzFoFzYUf2p5m1Q2SpyvbY+9DR5tLoGSqWc3vDTHBdQfKw+99u+jqFW17Ocel33GumtVYqFReMQLaU2U/6YfBeAUA0mM+oPuh7ek1J3LBUhXLtoj8TSN73Wec3G3gFAFBfNTJ1xi4/9Z/KFx7yxFD3zurP7Q9XdjN26mgwxEEqZS9AyangHLxk5K9XPNCCzJG3IU9RRLgFwcturr/3X+miJwWkYWSn5IpFiqVVpS2ify9HQrjFQBITHcRPo5r/gBNWPoR+ozIPX/SyF7xrXe5/2rgcIDcbbjsAQPhSLAKMicyHOq9DqDQQQGhGo7A3+ujkU/cWMS9OAAEpBODsb9z7mRVA6hWq+u8xkQ+cLqY0/923mLs0cskaUJ0Hy4fUMxqlUpFu2r4f/aX6FT06nmg9WDJ+HugmtWWI1S1DbA/1p0xXnmaIo9XAOBpChHQ/fD29MZP3HXWB3RXDBzOTU+47JUfdu6t/ziTjxk5JCAXH3Zy7zWRewyc3SBVNT9x11lL7S0v/XCgqkHAkHkDv/n9vsWT7scBIG0WF8Js6nehfy1pcJC7alaL+6lhSYzqQDMT9HrPte7B1sR4pe2VemB3inFQaS1FCOgqRvahto7ximK8AiAlRdiDbscPb0/P+jZvllpdfl5E3tHIvt/A6QFy9X6Xfe01G/ddkEG7E6kY+Z7ZdAlU8NDiEoelD3YW2kvR5hIoB0t7uayLyCO+msC33Ft09flQk104Gq20SlGMz2W2gkb3q5vV+3JA9617RMcrsffOzctUwn/fuD0Wh9rFeAUooEVXZwEC9lWICroW1/wxumrjaJo+5LKvf7eT+742c++ycDxAHj7p5LkGgpHVH9meDjLgc3bauUyF+sx5IlTDEflVmLFbzfrV4xUG1EC6fIWIoTaFF/xvPxNcR+O/qyOtqB+i8qN8/L51GmT47Tj8d8nQrgq7FNqf9upipSkDx4JwYgTlBMF3wHgFANJVmAo670e2p5esbdi8JSJ/1sh+wMChALn5tGTPN3B2g0x8/Ke7zvqHg8EQ73UH6z+yPc3+VyizIC1tD4D7EEibhbbWfr+W+xddfZLJLhgSusWdFLlyxwd2i67uJ6xHFl29r63CbtnA4R2HpYodBKDhc2gxWuoWDeOVtLDnYvmkWm2PDihUQCdaRadt38y0uvyUk2f/YSN7lYHTA+Tic05eVKL2lkNGvluSWalKi0schaE2lztVdAaOA0A+Ylfqzvlqm0gToilaj/CZ+I3onGQmfNsCO39/ZyLykN9bOtLf6HH0Mw4CTGC8kpYYAd1Ayie0ALh3cFuFC+j+7fb0mp+4thTQbYvI+xrZyGec3G3gFAEd9T6XvYD954K//CKEZDZkJ6DDMVi5D5K5HwE8pZrVYq9Gn9N9W1iF3jm0ZuuAiIFMspNXuj/TxK7961YNHNpBsCcvEJGBkJzxShoI6OKiahK3Vag96Fq0ssQPEvttHNFOneo9VxvZ64fvcj9r4HCAjvmMk3uvxz+d6/9rufafmw31eUMgVMNR+cmsalbbNLDfgl897lu50O4SSEvMllrLfrKLv6eOW4nQmjHF1mxRJvHKMvnbvn9dNasNaOu6MSNt9vdCBR0QV8x7kPFKPgg7yydqQKfV/DCqcBV0np+4diKTlqro/OvvXfaS/89lLzBwioCO+bCTLzdwfwVbTWukxWVSG7FTQYdjslK9NqGTaADSEWvCa9PIXjIpijH5kWJAF+MzFX2vtiNZdPU1bYXpz7nf9/uCwTaYfqESe4QB8TBeSUykVqE8y8YVtUsA7aptK2RA5/277elZa4P4z4vIOxvZwwYOBeiYz4hYCJ2D/JD9+7vO9hmo1Fn9d9vTlL4DT7ESWPfS6hJITqwJZ9pE5SfG5Edvggs4YtwbpR//alg3qW0wHzI230FAVxKRJnFLf//fAeMVdAIBXVyxv+f4HTeskC0uW3wVnYhctXE0TR908sA7GlnlG7tckP2ygLxdk+xuA1VLofafs/CDlVT1nFD1hmPyk1XVrOY3JR81cC5P0eryaKpZzS+AmIh8GH7ik5AVO/RvMsaiHN8qap6rkJtYq5MriS3iCN0mVFLef+4o9HtiXsOSyUjXpB0Tu+UR41oT0N0G45WkLQf+biegichXTVazWsxD4PobVuiA7n/bnl76sbvOWpm027EtIu9x2Wv+qXPv7M3kSQOHBBzLZ528yEDAEmTAbiSgS24QTECHDpg19Ft/vprVlhZdnYU4h+PDufORj+FS5PeHLbF+85NbiGOJX+lfzWrrEfYqTyagi9gCiYBuDzreqOh1mYq4Tx2tscqDSVxbGK+kK3R1oq/476MqMqrQoWw72tUaVuiATk3oH1nstnQ3fcLJs//UZS/9lsy92cghAUfWMHDqfjRcy8fYK0NXf3R7OrnB0o9uT/uJBTakxZH5yalqVos5mN3Nr2gf8tV9XNU7031rYodzEqoaG9jHJqvRg1iKsKgjpUmPKJ+FhS/70/Mz5Cv5jfymIl0xwljuf1sYr4ThF6YMB37PIe63qJYizmn06hwGC6IMKuwedC1+MtuJTDmt0rDyem8jG/6gk3ttnCXgaP7KZfd9If49tRrq8vkKusiflUEwcHuW2kr2akjXZ+BYisBEVQkTDdglxv3LhEgYsfahS6XqJEZAZ2pvecu0zfaDfgI98GFaWSSFHOl+mjGqNKnosYXxShgxxitUQ8cVOxwbi30CsLfCB3Te/749PelE1i0FdJ8VecafNLJXGDg9wJE5bdsa+X4KNlh3In2RPysDYeA2dOW4pQk8P3mxREi3P13pH6sdV7sFA8cAW2KEKaxYDSPWeKrwkx4aMoZuDyrcG4ejYyJaVSEPUb7HqOjYV4xAhesRBgFdyRhYMEpAZ1QSAZ2aMHEUbdZd9pJ3uew+MwcEHNLHnTzXwDkLOckSdRL5fLMVJIDbs1RFJ4R0+6tmtRFDbbionoMFTHgFoBO96xHeOoVJj1ifge/oQ9KQbq5QB40iiPEdQAWtPYxXAtDtEqiGLp+Y33m+4wMhnUHJBHTnt6fnnciypSo63xrwnY3s9QZOD3AkTzo5aeFeCiXy5wzWyhMoKoNVdEJItzetwjDR2lIx+YvdYrTTooVXODEWPRV60kN/x2JVz7BI7WiminjQsEm/v6igtYfxStqC//7pIkrE4R0IEAAAGcRJREFUE/u51Nqi59KTxCro/AT3mKWAzr8+6uSFb29klBCjkIzcR0EG7OfvOlspw+cEEmBx8rMV0g0YOJbodJJ3Xvfqs2Bu0dWZaMBuMX53CfLDiTX5Ya6ryyGMRfrepgXxEdEWEJ2iY7dYk7YE9PtjvJK2GH//VFDFFTug66eKzp6kAroL29O+PPiSgUO5qSEif+myM5tO7jZySMCBfUbkBQbOVlkmVdcMHANgnrYCMfVbr3xIt1LNaqVelKMTPEuRVmDfDtVzsCLGvnelFHGPj8Ei/g7od3escJHv6CNiYRA6aDLi2I2Azh7GK+HE+A0c5vcjHp3PiN1Ba4oOQLYkFdBJMxCbbIhsNjQcs/D6pJN73uGylxo4PcChfF6yuy3cQyEY+JysgAUObjJCv/6D8JUHV6tZrZRtI/RBbyn2fp67rBvYjBs2xVgAxIRXWLH257LU3vegJiJOzvMdfXRMsOLYtN3duUhncoEuB3fEeCVhGtbE2De3yBX/KYg9Vuwt6Hg1WckFdD+2Pb3hRCattbr8q0b28Aec3GvgFAEHVqYWl04kdotLHkyAA9IHecttGc5Xs1qpWl7qnnMrxsI5YX8e3E6k1nDDrFYNKlbw01+khRr6WxVroo7J+eMJuY9QjAlk5MzAnsEE9HfAeKUUYtwH56iii8rCd98wrS7tSC6g8358e3rK2gDy8yLy9kY2auBQgEL58e3psjy0MzkBHIJWRS0bPmenROQJP0mb+gNuNav5id3HDO0517LJykDcQYz2MqxYDkR/J2JVW0/oxHcRzEb8/uY7+oh0bBFyYo12/InR76ilyOM3ArqDYbyStli/hSxkjEQrJy3swXuFkM6GJAM6aVajjFmronvcZd+86rL7DJwe4EAs3DehxP6cP749TYtL4PDGjLa6bHde96ZLbuDrV136SkERuWjgcPYyRWUG7iDGb+8Eq9KDijXptdM6yPq11kq/U5HenhbExzMZOFjh9zQhRsK5OcZpB8Z4JWFaJRmjyIQKqrisBKSEdAYkG9D9++3pJSeyYCmguy4if9zIfsDA6QEO5LNOXkRAl9bnBFKiK8+KMJjs14HvWgqDX/+wrpO6T0Sc2L2TTVaF4gBiTHix50NYMb8HBi1fa/09Oh/xELgPjkivXeg9w1hMmAhDnQ/4Djg4xivpi1ZFV6CK/6QsuvpSpOrYvVwp6z76ViQb0ClzJdmfdPLstzWylxk4FOCOGpwiAMbp6vu5glynVlC3Uc1qU0Xr+68Vc5Pa5irmpO5BUD2Hg4hVveNXLDPpFYAu5Ij5G2HyWmvAcyXiIbCI4oj0dzjGtSOgKzhjnQ+WdXIaB8N4JX0xK/6XCOmisTQW8vvor/C3EEfSAd1/2J5ecyIXLFXRbfmlSo3stX/v5F4DpwjYl4V7JhQq6IBCmzC0+uwgenXl+xM6CJ6wHNZVs9pINavNa8XceYN7ze3GxC8ORMObWPtWjzLpFUzs8+yv9byVVmEGwjlvnkUUh6O/xTEXyBDQFZQGc7PGOh9QqXEIjFfSF3lBUSukq5TpnFuw6OqzEe/tvfjuD4/5e56gLqzUK+j8xPeUE9m0FNJ9RuQZb2tk32/g9AD7IqAjoAOKQCf5irAf3V4GdSVzK6yb0km4aBO5fjCuoaGfUPZfUY/6FbSxjucIJpj4xSHE3ANrlJWq+dMqjeXIhzEce4W6tieeMhDOCZPzB+MnS3VcsKa/xf2RDmVdJ49RIPr30wrmRg0dOdVzR8N4JX0xg1Af0l2lzWEUFs/5qAZ1O3vpF63zTxF1p/4B/+P29Ma/7To7YeRB5KZ1l339H4i87F/d5X7LyCEBtypTckRKBhSa31xb99Qw9Xt/SIP62tlXpprV1nXFeuu10ckJDX3I9kGg/+eA/tPqnnIHtawrEYGDmoqwl1O71kpVv2p6kknw3PjJj6uRj2FQQzrfgjfoZIyuip+NGPC0m8v771yrBIu85+uAkWvVQphSADqBWml7WfobakcAcDSMVxLnn/OqWW058vPY+dZvKEF6GP7ZVc+5xefwwdb8SjWrre4xN0F1fYckH9B5/7ExPfsjzZBu0MDh7PCtLv+ikb32uZm856u73OMGDgm4RZmqu2J/zh/pOlv5T41pBkDAMejgdijyw2sn9evrZvVaNau1/tUPkHdXibV/h7SCt92sTfp1mrn9h2Gbn2AyMBkiulJ1VCe+5nV/TXSITnotGKgG7m2b/JrMe0GBBnOThiZ9NgN9Tw8ksODEkpS/jwYKXDHSPtYryt/7ApP+R8N4pTQsLCjq12q6dQ2GZ+lOkjs/NnrM+DG2FhPfrMhum5tIgV/oG63NaykCOmlOvk8Y+JJ7Gt/q8g8b2fd9WeYmn5nJk4YODdhBQAegaBZdfULbQ1pq5ZOHvRYdlX0y8gKr+HBEs4bun9bE16aG7iv6zzVWqx/bhKF2vX7y64q2nJzVya+OfH/pb+CIfl4zC1TVFJN8hbOZ+AR8f8R9/comVECfMsYridMFRXNGnmX7dSuGi1o9Na/XeY1nrs7SbkCXElpojEMqTUD3fzWml/6XrrNWvuRu+pCTF/5+Ixt99V3uZ4wcEnDTF2fywX9w8hzOCICCmdAVxdYmJpGf5dAt45AOrb6dNFZd2qth0nBr8rhtlWqe+6lF2/8yb1p9cMHYZHyvTsac05XqS+3tgw4SZmnleHtrO6u/fet8TxcS1THoFNoiHhPjladJdryiVXQjem6tGGwfX9yhqwuOd91T7naD2yhNQCfNChmLX3Ly7kb24quZvPvBLkepP0zp2cmQyxHQGaigY9NVoEP8hKa29VoipCuFTR3fAccxWaA9LMteLXtkPiDS9pIWJz/6WxUJrf9AJ8B8cLd7UruvgL9vRd4TrswIVdEJfiHVFGeyIxivJE4XFE0VpLqXZ+0O0TmMMWvd/xBGV5nO8080ptecyJRra91n4XVdRP5oO3vDHzeyf2bgNAE3Wbg/QjHwWQnogA7SqoOKrupD2kZomYbj0r3A8lzpDTuKFhT16yRn+6toE2KX2HeqkJapeEIHsJCqgxivlINWnPMcWzI6VrpQ9vNQRqUK6KQ5Ce4DunVrId0XROTtjeyR9zSyFxg4TcAOC/fGD3WdHQpxNZzIUuTPmnKLBiAKQrpSOM2kLzqIvXFKgMmP4Napwiosrhs6ocJCqo5jvFIOYxpwo0Q0nCWEL5nSBXQ/2Zje8K0urQV0/vVpJ/f8ZiP7sXcT0sGIk+KeNHBvBAmuDHzOIEEkUDaEdEmb01XEQEfohveXOJvpY2V6UFQ5F9MyC2DQAaf1txUdpOeUhSaJ0+tMGFtOI7rACSVRuoDO+6nGtNmScB/SLRLSwYi+TB4v0bWIPXFABR2QE0K6JPlwjv2M0HGLrj7Bd0VpjLAyPXdMzhcXv7E4rksspMoPC03KQe+hubKfh7LR+QvGqSVSyoBOmtUqExar6PzrU1TSwRAD90QlxNn4qcb0SuTPyea6QI7aQjoecIqPcA55o6VQCejeWkHGmSVFlXNxXWDvORzTnC54Qb4Yr5SAPvcQxpaMLnCqcI+XQ2kDuovNyfg5yyHdAiEdIntuJh+xcD+E4kQ2Y37OiUD77QFl5UM6fcChhV1xEc4hd/pAzN9ZCei1Pl3285CDBb6rC2tVK3OAo2KsFgjjlVKhG0wJEdKVR2kDOjVh+Y/ct7t8y3b2f/xBI2NlJ6I4KfKkgTMf8u8/dgseAjogAF3Re5qBbuEw4YNgFl19XkQe4YynT6u8COk6Z5UJ48La1JZawFExVguM8Uo5sGVDeRHSlUOpA7qpxvSGE5myWkXnmnffM5a3szc8up29ysApQ8n8j5n76Mn490GwvdmcyFrkz0pABwSiE7IVNl8ujEeY8EFoi64+RVvcctDfhAtlPw8d4CcOKzqRiOIZobUljuESY7U4GK+UAyFdeRHSpa/sFXRyqTE96UTWLYd0nxeRP2lkD//CVvbDn3Jyt4HThpLoz+RjWfx7INjebE4k9j50BHRAQDrQ9ffdAufdLP8Q8pBOPADB6WQjk14loG39qKQ7OsK5Yju96OpLZT8JOLLT7DkXF+OVciCkK6+2uQuufYJKH9Ap86t8tkXk3S775l/Y7vrJv2ZfOgTUm8nfxD7fPxhub7bYLS5PRX5/oHR0XzrfzukhVqSZ05rsnS/7iUBc7F1ZHrS7PLJlwrlCO61/+8Bh+bHz/fz92MB4pRzaQjoWmZaMVrlXCOPTQ0AnIv+5Mb3kRBYsV9G1Xh928uxf3c6m3kLLSwTyReI+YuBvfyDEpzVQQSf/c9dZ9pwEItAQaEgnGRHfJZ3sjb1wAtjRtnclEqcTzSzaODi/5xThXHERzuGofDgwwFjNFsYr5dC2yJT23CWj136MPfXTQkCnnMiYE9ksQkj3ORH5743s4Z/a6voJqumQt75MHjfwdx+kgu6/NPeljN3ylo3ZgUj8ijQ/ycjEbFR+T8AH/eQCk72wRiex7+f7IX26aIMWUnd2mj2nCqvVQppwDoe1qXsDjzBWs4nxSnloe+4H2Ve9fPQ+Z4FxIgjo1E83pjeK0Oqyxbe8/ICTF75pO5v65e3s9exNh7w8L5OPGDi5IavKYu+9QAUdEJlOzA7QIiY4f76H2AMHlmmlwAAPw+lr25CfFlK3WqetXaHRQhpHtaxjNfYGNo7xSnnos9MQz67l07bA+DQhbbER0LX56cb0fENkoSEiRXl9VkT+tJG98me2u2Z+t5ExsY+O6xP5WHf8+yHY3mwNkaXIn3Xw+7vOBmnpCeD2tHWEbxHzfCZnc7esE71UzaEQ9PvBj7sfYXV62tindE+txRS0tSumS4uuzvXDYa1rxWVF90BCATBeKY+2Z9f7CWXLp62a7gL3ejER0O3iJBtzkm06yaRIr4+77J7f3e56w/+51VUnqEMnfV2Xe/ykgXvh+7pqQf6unWRLBu597mHACF2VNqKtQ3jY6az2yR4mClE4WkEwwEbt6WurrC7zgg1aEBfbzcUwZT8ROJR1bWU7QMVlcTFeKQ//TEVFVTlpSDup9zpBXcEQ0O3ys416oVpdtvNtLz/ismcT1KHTnpG5dxo4qUH2ofvZxs6KwNgDGfahA4zxrUP0YYeg7viY7EEy2jZq57shcW3VdA+WbG86P8FzQb+zaUFcPK3fXBbD4DDax2q0sk0A45Vy8fetv381qON6l8iuoO4RgtpiIKDbw8826vNO5JLbqaYp3mtLRD7ssme/dbvrDZNbd/3Sm7e7XvWEy+41d6JRGF+ayRMG7oVggbMTWYr8WYdrXTXaXAIGtQV1z2cV6qEta8Uckz1Izq4Qn7a4CdNrPVSC1embugJ7QCd6UCyrBCw4ggXGamljvFIuGtRVtPXlHFVV5aFB3ZQGtQ8xd2Fbd9lPwO3UG/WJs82WeoM2j/DOfFD3SSf3/JHLHv7TRvbwczL351/d5Zb/eebe+axMnrR+/LDjyzP3Ad96MbLhUG/vRHxFx2jkz+tXaLP5NmCU7r8xVs1qE1p57//Zz/W6hX8I9N+pk+xZgjLQCqOlaraz0GZCf8/5bkiQTl7PVrNa6zegsM+Nu2zqGHSKVpaF0/rNnaJaDofgw1z/fTbPWK08GK+Ui/4m+GfXPr3W/uXnvHvLfm7KQLvWzOvcRevaj3D97SCg25//g11L4Q/2CyLyPpc98L7t7IH/R0S+LHNLz8vkPYNd7s+fn7mPGThEGPbCTN79Nv07iulsV21kupF/OzT/Hme7arEvyBgBHWCfTl7uTGRWs52FPWMMdncs6EQPq69RSjrJ6R+CJ6pZbajtu4HJr8S0BXVF/w3wVc6zfG8XzrqGcku0jcYB+SB3SV+EciXHeKVc9Nl1Vl+iY5cR3VLmVNnPT+r2uP5DbYHdEHMY8RDQ7WO6Ud8Yb1bRPWb2II/Aj8Y2XVb5WyeVtzcyeUYmH7xX3MqXZfL+r8zce+/rco8X7kMhVz7EvSeTD37eyXMin+kRfQDNnWtOLger2tvD4HhXbWimwepXoChaK1GlOdgdaVudWIaBbmuyZ14ne6i6AJSuWm5Nfg20PQQzGZKQtmqE9tXpMceSB0HlTLGs6wJi/7fmv1dWuG64g83W30rb3wzPl9gT45XyaX9+lacCuyHdv6z1T8LaROk9f/M3Qe/7obZXH/d+GAR0d+Anx8901fz+AldMH+gRfd6/nDzno5I9511O5I8kk55tkb7M/c0Xi3zkWZk83i3S9RWZ+7vWO3xl5h7/Elpkls6XilvxfyeRP3fIfejmDUyqtFrnASiYVhsJuXVlWkoD3NW21ddLB/j/T82FwJ/H0iTsWoTPn8TfmE6mP61CSQOdIf0/g411DEnq+6N9dbJe20rbK3YbzLJUzqTyN9X6HBslClVC/7akZq01Xijp2KwdY5VjYLyyp+Tvqd2BXYsGNwP6f7b/e6pKufhF7/u1vQojdE6jT//PFP8Gol7zzDkX8/0L40xXzbevOlf28+DdbeAYEN62gRaX6v7LgarKznTVNgxUvjzzcoNKFCAlujKxUrBWEuttq6+XmPQBgMNrC+zaVyfntTKdyhkAAADAOAK6Q/jerppfPTJamAMG0nTp5xr1iRCfzMg9f+HnGvXJyMcAIEdtKxIrbavRYgV3vipuQydz19omdVkoAAA5aVuVfNgV6hvtrYlaq95ZRAEAAAAUAwHdIX1PV23FQGsSoMw2f75R7wvx+b+nuQfl1cjn2q9+Hvh5quiAUrrNpG17e5nDap+0XWtr5UAIBwAAAAAAEBAB3SF9T9dOW5IlQjogqod+vlG/pSdyHr6nq7ZmYFPcCz9PFR0AAAAAAAAAJIOA7ghOE9IBsS1cadRHQhzD6a6ab6d5MfLn3amiu0IVHQAAAAAAAAAkoYvLeHg6SV7RfVoAhDd8uqt2pz05OmVWA7KY/D5UQfbdAwAAAAAAAADkj4DuiHxI50QqTmTV1yDy4sUr+Gss4L0+b+D6ToyFCyUBAAAAAAAAADkioDuGWUI6XrxivoIEdNJ8r0kD17rXH0eozwwAAAAAAAAAyA8B3THNPdXucqHQHwQonv7RrlqQkG6uUV/z/zBwhkZHu2oVA8cBAAAAAAAAADgGAroO8CHdXKM+4kTmqKjixSvoK9i+bEaq6PxrNtRnBgAAAAAAAADkI3POcWo76OGumm9Bdz6ZDwTY9+AvNupLIY7y4a6aD8dGDZyRC7/YqNPuEgAAAAAAAAAKigq6DtNJ89MispnUBwPsChlUWQnFzj/cVRsycBwAAAAAAAAAgCOggi4n39WcPJ/3+2Ql+QEBWx58Y6Aquu+yU0W36ve/fGNzH0wAAAAAAAAAQIFQQZeTNzbqK87JkHOy4DNQXrx45foKVtnmnEw4J5sGrudgyM8NAAAAAAAAAOgcKugC+M6sNiEiF5P/oEBcD/2Sq8+HOILvzEztNRnscwMAAAAAAAAAOoOALpDXZTstL31rvMFSfGAgvPVfdvWBUO/6uqy2ZqSFrd/vsvLLrr5i4FgAAAAAAAAAAAdAi8tA/OS5E6k4kUs+EuXFi1fHX/2vbVarBuFExoxcw14nMvvarNaX+vcoAAAAAAAAAKSCCroIXpPVKlpNZ6H6BkiJryYbeJOrb4T4TK/Jar615LCR87fwJlcfMXAcAAAAAAAAAIA7oIIugje5+pITGXIiF6ii4sWr49VkU6Huaq2i2zRyDYe/I6vNWvquAwAAAAAAAADsjQq6yF6d1Qa0mu5UqU8E0FkP/pqrL4U4p6/Oar5q7VFD1+/0r7k6QR0AAAAAAAAAGEZAZ4RO8k/R9hLoiPVfc/WBUKfy1bZaXQohHQAAAAAAAADYRkBnzKuy2pgGdb1lPxfAMV14s6tPhjiJr8pqfSKyYixgP/1mQjoAAAAAAAAAMImAzqBXNif7J/RFUAcc3f2/7uorIc7fK7NaRUSuGrtWp3+dkA4AAAAAAAAAzCGgM0yDujEN6mh9CRzeuogM/bqrb4Q4d6/Mar5i77yx60RIBwAAAAAAAADGENAVxCuarS99UDdY9nMBHNLcb7j6WKiT9oqstiQip4xdpNO/QUgHAAAAAAAAAGYQ0BXMy5tt9HzYMFr2cwEcwum3BAqoXm5zPzrvwlsC7ckHAAAAAAAAANgfAV1Bvfyp9pdjVNUBd7QpIpW3BNqP7uVZbUhElgzuITn3loDVhAAAAAAAAACAvRHQJeChrDag7S9H2KsOuK1VH9I9Gmg/uoeymr8fHzV4OYKeBwAAAAAAAADArQjoEjPSrNypUFkH7Glh3tVHQp2akebekVcMXoqdisL5QBWFAAAAAAAAAICnI6BL2HCzDWal7UVgB4jMLQRs8zic1aZE5JzR835hgX3pAAAAAAAAACA4AroS+dfNwK5VYTekL1piooxO/6arz4b63P86q/n3GjV6npd9xe1vuvqagWMBAAAAAAAAgFIgoIMPD3xgN6AvH9r16b8T3iFlhHRP8S0vJ3/T1aesHBAAAAAAAAAApIyADndUbe5r18eZQmoWXX0p5EeqNsNwy1YWXX2DP3QAAAAAAAAAyBcBHQAAAAAAAAAAABBQFycbAAAAAAAAAAAACIeADgAAAAAAAAAAAAiIgO7/b8+OCQAAABAG2T+1LXZBDQAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAACAk6AAAAAAAAKCy7W5qEB/G5B1kAAAAAElFTkSuQmCC mediatype: image/png install: spec: clusterPermissions: - rules: - apiGroups: - "" resources: - pods - pods/exec - services - endpoints - persistentvolumeclaims - persistentvolumes - events - configmaps - secrets - namespaces - nodes verbs: - '*' - apiGroups: - extensions resources: - deployments - daemonsets - replicasets - ingresses verbs: - '*' - apiGroups: - apps resources: - deployments - daemonsets - replicasets - statefulsets verbs: - '*' - apiGroups: - chaosblade.io resources: - chaosblades - chaosblades/status verbs: - '*' serviceAccountName: chaosblade deployments: - name: chaosblade-operator spec: replicas: 1 selector: matchLabels: name: chaosblade-operator strategy: {} template: metadata: labels: name: chaosblade-operator spec: containers: - args: - --blade-version=0.6.0 - --image-repo=chaosbladeio/chaosblade-tool - --pull-policy=IfNotPresent - --namespace=kube-system command: - chaosblade-operator env: - name: WATCH_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.annotations['olm.targetNamespaces'] - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: OPERATOR_NAME value: chaosblade-operator image: chaosbladeio/chaosblade-operator:0.6.0 imagePullPolicy: IfNotPresent name: chaosblade-operator resources: {} serviceAccountName: chaosblade strategy: deployment installModes: - supported: true type: OwnNamespace - supported: true type: SingleNamespace - supported: false type: MultiNamespace - supported: true type: AllNamespaces keywords: - chaosblade - cloud native - kubernetes - open source - chaos engineering maturity: alpha labels: alm-owner-etcd: chaosblade-operator operated-by: chaosblade-operator selector: matchLabels: alm-owner-etcd: chaosblade-operator operated-by: chaosblade-operator links: - name: ChaosBlade url: https://github.com/chaosblade-io - name: Chaosblade CLI url: https://github.com/chaosblade-io/chaosblade - name: Chaosblade for Basic Resource url: https://github.com/chaosblade-io/chaosblade-exec-os - name: Chaosblade for Docker url: https://github.com/chaosblade-io/chaosblade-exec-docker - name: Chaosblade for Java url: https://github.com/chaosblade-io/chaosblade-exec-jvm - name: Chaosblade for C++ url: https://github.com/chaosblade-io/chaosblade-exec-cplus - name: Chaosblade for Kubernetes url: https://github.com/chaosblade-io/chaosblade-operator - name: Documentation(Chinese) url: https://chaosblade-io.gitbook.io/chaosblade-help-zh-cn maintainers: - email: chaosblade.io.01@gmail.com name: ChaosBlade Community minKubeVersion: 1.12.0 provider: name: Alibaba Cloud version: 0.6.0 ================================================ FILE: deploy/olm/deploy/olm-catalog/chaosblade-operator/0.6.0/chaosblade_v1alpha1_chaosblade_crd.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: chaosblades.chaosblade.io spec: group: chaosblade.io names: kind: ChaosBlade listKind: ChaosBladeList plural: chaosblades singular: chaosblade scope: Namespaced subresources: status: {} validation: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' type: string metadata: type: object spec: properties: experiments: description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: description: Action is the experiment scenario of the target, such as delay, load type: string desc: description: Desc is the experiment description type: string matchers: description: Matchers is the experiment rules items: properties: name: description: Name is the name of flag type: string value: description: 'TODO: Temporarily defined as an array for all flags Value is the value of flag' items: type: string type: array required: - name - value type: object type: array scope: description: Scope is the area of the experiments, currently support node, pod and container type: string target: description: Target is the experiment target, such as cpu, network type: string required: - scope - target - action type: object type: array required: - experiments type: object status: properties: expStatuses: description: 'Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: type: string error: type: string resStatuses: description: ResStatuses is the details of the experiment items: properties: error: description: experiment error type: string id: description: experiment uid in chaosblade type: string kind: description: Kind type: string name: description: resource name type: string nodeName: description: NodeName type: string state: description: experiment state type: string success: description: success type: boolean uid: description: resource uid type: string required: - state - kind - success type: object type: array scope: description: experiment scope for cache type: string state: description: State is used to describe the experiment result type: string success: description: Success is used to judge the experiment result type: boolean target: type: string required: - scope - target - action - success - state type: object type: array phase: description: Phase indicates the state of the experiment Initial -> Running -> Updating -> Destroying -> Destroyed type: string required: - expStatuses type: object version: v1alpha1 versions: - name: v1alpha1 served: true storage: true ================================================ FILE: deploy/olm/deploy/olm-catalog/chaosblade-operator/chaosblade-operator.package.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. channels: - currentCSV: chaosblade-operator.v0.6.0 name: alpha defaultChannel: alpha packageName: chaosblade-operator ================================================ FILE: deploy/olm/deploy/operator.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apps/v1 kind: Deployment metadata: name: chaosblade-operator namespace: kube-system spec: replicas: 1 selector: matchLabels: name: chaosblade-operator template: metadata: labels: name: chaosblade-operator spec: serviceAccountName: chaosblade containers: - name: chaosblade-operator # Replace this with the built image name image: chaosbladeio/chaosblade-operator:0.6.0 command: ["chaosblade-operator"] args: - --blade-version=0.6.0 - --image-repo=chaosbladeio/chaosblade-tool - --pull-policy=IfNotPresent - --namespace=kube-system imagePullPolicy: IfNotPresent env: - name: WATCH_NAMESPACE value: "" - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: OPERATOR_NAME value: "chaosblade-operator" ================================================ FILE: deploy/olm/deploy/role.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: chaosblade labels: name: chaosblade rules: - apiGroups: - '' resources: - pods - pods/exec - services - endpoints - persistentvolumeclaims - persistentvolumes - events - configmaps - secrets - namespaces - nodes verbs: - "*" - apiGroups: - extensions resources: - deployments - daemonsets - replicasets - ingresses verbs: - "*" - apiGroups: - apps resources: - deployments - daemonsets - replicasets - statefulsets verbs: - "*" - apiGroups: - chaosblade.io resources: - chaosblades - chaosblades/status verbs: - "*" ================================================ FILE: deploy/olm/deploy/role_binding.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: chaosblade labels: name: chaosblade roleRef: kind: ClusterRole name: chaosblade apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: chaosblade namespace: kube-system ================================================ FILE: deploy/olm/deploy/service_account.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: ServiceAccount metadata: name: chaosblade labels: name: chaosblade namespace: kube-system ================================================ FILE: deploy/oss/crd.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: chaosblades.chaosblade.io spec: group: chaosblade.io names: kind: ChaosBlade listKind: ChaosBladeList plural: chaosblades singular: chaosblade shortNames: [blade] scope: Cluster subresources: status: {} validation: openAPIV3Schema: properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' type: string kind: description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' type: string metadata: type: object spec: properties: experiments: description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: description: Action is the experiment scenario of the target, such as delay, load type: string desc: description: Desc is the experiment description type: string matchers: description: Matchers is the experiment rules items: properties: name: description: Name is the name of flag type: string value: description: 'TODO: Temporarily defined as an array for all flags Value is the value of flag' items: type: string type: array required: - name - value type: object type: array scope: description: Scope is the area of the experiments, currently support node, pod and container type: string target: description: Target is the experiment target, such as cpu, network type: string required: - scope - target - action type: object type: array required: - experiments type: object status: properties: expStatuses: description: 'Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html' items: properties: action: type: string error: type: string resStatuses: description: ResStatuses is the details of the experiment items: properties: error: description: experiment error type: string id: description: experiment uid in chaosblade type: string kind: description: Kind type: string name: description: resource name type: string nodeName: description: NodeName type: string state: description: experiment state type: string success: description: success type: boolean uid: description: resource uid type: string required: - state - kind - success type: object type: array scope: description: experiment scope for cache type: string state: description: State is used to describe the experiment result type: string success: description: Success is used to judge the experiment result type: boolean target: type: string required: - scope - target - action - success - state type: object type: array phase: description: Phase indicates the state of the experiment Initial -> Running -> Updating -> Destroying -> Destroyed type: string required: - expStatuses type: object version: v1alpha1 versions: - name: v1alpha1 served: true storage: true ================================================ FILE: deploy/oss/operator.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: apps/v1 kind: Deployment metadata: name: chaosblade-operator namespace: kube-system spec: replicas: 1 selector: matchLabels: name: chaosblade-operator template: metadata: labels: name: chaosblade-operator spec: serviceAccountName: chaosblade containers: - name: chaosblade-operator # Replace this with the built image name image: chaosbladeio/chaosblade-operator:0.6.0 command: ["chaosblade-operator"] args: - --chaosblade-version=0.6.0 - --chaosblade-image-repository=chaosbladeio/chaosblade-tool - --chaosblade-image-pull-policy=IfNotPresent - --chaosblade-namespace=kube-system - --webhook-enable imagePullPolicy: IfNotPresent env: - name: WATCH_NAMESPACE value: "" - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: OPERATOR_NAME value: "chaosblade-operator" ports: - containerPort: 9443 protocol: TCP volumeMounts: - mountPath: /tmp/k8s-webhook-server/serving-certs name: cert readOnly: true volumes: - name: cert secret: defaultMode: 420 secretName: chaosblade-webhook-server-cert ================================================ FILE: deploy/oss/rbac.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: ServiceAccount metadata: name: chaosblade labels: name: chaosblade namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: chaosblade labels: name: chaosblade rules: - apiGroups: - '' resources: - pods - pods/exec - services - endpoints - persistentvolumeclaims - persistentvolumes - events - configmaps - secrets - namespaces - nodes verbs: - "*" - apiGroups: - extensions resources: - deployments - daemonsets - replicasets - ingresses verbs: - "*" - apiGroups: - apps resources: - deployments - daemonsets - replicasets - statefulsets verbs: - "*" - apiGroups: - chaosblade.io resources: - chaosblades - chaosblades/status verbs: - "*" - apiGroups: - admissionregistration.k8s.io resources: - mutatingwebhookconfigurations - validatingwebhookconfigurations verbs: ["get","create","update","list","watch","patch"] - apiGroups: - certificates.k8s.io resources: - certificatesigningrequests - certificatesigningrequests/approval verbs: ["get","create","update","list","watch","patch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: chaosblade labels: name: chaosblade roleRef: kind: ClusterRole name: chaosblade apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: chaosblade namespace: kube-system ================================================ FILE: deploy/oss/service.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: Service metadata: name: chaosblade-webhook-server namespace: kube-system spec: ports: - port: 443 targetPort: 9443 selector: name: chaosblade-operator ================================================ FILE: deploy/oss/webhook-cert-job.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: batch/v1 kind: Job metadata: name: webhook-certs-job namespace: kube-system labels: app.kubernetes.io/component: webhook-certs-job spec: template: metadata: name: webhook-certs-job spec: restartPolicy: Never serviceAccountName: chaosblade containers: - name: webhook-job-certs image: bitnami/kubectl:latest imagePullPolicy: IfNotPresent command: - "sh" - "-c" - | set -e set -x K8S_SERVICE=chaosblade-webhook-server K8S_SECRET=chaosblade-webhook-server-cert K8S_NAMESPACE=kube-system # test if secret already exists certs=$(kubectl get secret ${K8S_SECRET} --ignore-not-found -n ${K8S_NAMESPACE} -o name) if [ "${certs}" = "secret/${K8S_SECRET}" ];then echo "Secret already exists" exit 0 fi csrName=${K8S_SERVICE}.${K8S_NAMESPACE} tmpdir=$(mktemp -d) echo "Creating certs in tmpdir ${tmpdir} " cat <> ${tmpdir}/csr.conf [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = serverAuth subjectAltName = @alt_names [alt_names] DNS.1 = ${K8S_SERVICE} DNS.2 = ${K8S_SERVICE}.${K8S_NAMESPACE} DNS.3 = ${K8S_SERVICE}.${K8S_NAMESPACE}.svc EOF openssl genrsa -out ${tmpdir}/server-key.pem 2048 openssl req -new -key ${tmpdir}/server-key.pem -subj "/CN=${K8S_SERVICE}.${K8S_NAMESPACE}.svc" -out ${tmpdir}/server.csr -config ${tmpdir}/csr.conf # clean-up any previously created CSR for our service. Ignore errors if not present. kubectl delete csr ${csrName} 2>/dev/null || true # create server cert/key CSR and send to k8s API cat <&2 exit 1 fi echo ${serverCert} | openssl base64 -d -A -out ${tmpdir}/server-cert.pem # create the secret with CA cert and server cert/key kubectl create secret generic ${K8S_SECRET} \ --from-file=tls.key=${tmpdir}/server-key.pem \ --from-file=tls.crt=${tmpdir}/server-cert.pem \ --dry-run -o yaml | kubectl -n ${K8S_NAMESPACE} apply -f - ================================================ FILE: examples/create_services_in_batch.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Batch create services with given name prefix and port count # Equivalent to: # blade create k8s service-self create --name-prefix my-service --namespace default \ # --service-count 2000 --ports-per-service 20 --kubeconfig ~/.kube/config apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: create-services-in-batch spec: experiments: - scope: service target: self action: create desc: "create 2000 services with prefix my-service" matchers: - name: name-prefix value: - "my-service" - name: namespace value: - "default" - name: service-count value: - "2000" - name: ports-per-service value: - "20" ================================================ FILE: examples/delay_pod_network_by_names.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: delay-pod-network-by-names spec: experiments: - scope: pod target: network action: delay desc: "delay pod network by names" matchers: - name: names value: - "redis-slave-674d68586-jnf7f" - name: namespace value: - "default" - name: local-port value: ["6379"] - name: interface value: ["eth0"] - name: time value: ["3000"] - name: offset value: ["1000"] ================================================ FILE: examples/delete_pod_by_labels.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: delete-two-pod-by-labels spec: experiments: - scope: pod target: pod action: delete desc: "delete pod by labels" matchers: - name: labels value: - "app=guestbook" - name: namespace value: - "default" - name: evict-count value: - "2" ================================================ FILE: examples/delete_pod_by_names.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: delete-pod-by-names spec: experiments: - scope: pod target: pod action: delete desc: "delete pod by names" matchers: - name: names value: - "redis-slave-674d68586-86r2t" - "frontend-d89756ff7-hmm62" - name: namespace value: - "default" ================================================ FILE: examples/fail_pod_by_labels.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: delete-two-pod-by-labels spec: experiments: - scope: pod target: pod action: fail desc: "inject fail image to select pod" matchers: - name: labels value: - "app=guestbook" - name: namespace value: - "default" ================================================ FILE: examples/increase_container_cpu_load_by_id.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: increase-container-cpu-load-by-id spec: experiments: - scope: container target: cpu action: fullload desc: "increase container cpu load by id" matchers: - name: container-ids value: - "2ff814b246f86" - name: cpu-percent value: ["100"] # pod names - name: names value: ["frontend-d89756ff7-pbnnc"] # or use pod labels ================================================ FILE: examples/kill_container_process_by_id.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: kill-container-process-by-id spec: experiments: - scope: container target: process action: kill desc: "kill container process by id" matchers: - name: container-ids value: - "f1de335b4eeaf" - name: process value: ["top"] - name: names value: ["frontend-d89756ff7-tl4xl"] ================================================ FILE: examples/modify_service_traffic_policy.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Modify service externalTrafficPolicy and internalTrafficPolicy # Equivalent to: # blade create k8s service-self modify --name my-service --namespace default \ # --externalTrafficPolicy Local --internalTrafficPolicy Cluster --kubeconfig ~/.kube/config apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: modify-service-traffic-policy spec: experiments: - scope: service target: self action: modify desc: "modify service traffic policy" matchers: - name: name value: - "my-service" - name: namespace value: - "default" - name: externalTrafficPolicy value: - "Local" - name: internalTrafficPolicy value: - "Cluster" ================================================ FILE: examples/node-cpu-load.yml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: node-cpu-load.yml spec: experiments: - scope: node target: cpu action: fullload desc: "increase node cpu load by names" matchers: - name: names value: - "node-example-01" - name: cpu-percent value: - "80" ================================================ FILE: examples/node-disk-load-burn-read.yml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: node-disk-load-burn-read spec: experiments: - scope: node target: disk action : "burn" desc: "increase disk burn by names" matchers: - name: names value: - "node-example-01" - name: path value: - "/home" - name: size value: - "20" - name: timeout value: - "100" - name: read value: - "true" ================================================ FILE: examples/node-disk-load-burn-write.yml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: node-disk-load-burn-write spec: experiments: - scope: node target: disk action : "burn" desc: "increase disk burn by names" matchers: - name: names value: - "node-example-01" - name: path value: - "/home" - name: size value: - "20" - name: timeout value: - "100" - name: write value: - "true" ================================================ FILE: examples/node-disk-load-fill.yml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: node-disk-load-fill spec: experiments: - scope: node target: disk action : "fill" desc: "increase disk fill by names" matchers: - name: names value: - "node-example-01" - name: path value: - "/" - name: size value: - "2048" ================================================ FILE: examples/node-mem-load.yml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: node-mem-load spec: experiments: - scope: node target: mem action : "load" desc: "increase node mem load by names" matchers: - name: names value: - "node-example-01" - name: mode value: - "ram" - name: mem-percent value: - "80" ================================================ FILE: examples/node-network-delay-by-names.yml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: node-network-delay-by-names spec: experiments: - scope: node target: network action: delay desc: "delay pod network by names" matchers: - name: names value: ["node-example-01"] - name: interface value: ["eth0"] - name: time value: ["3000"] - name: offset value: ["1000"] ================================================ FILE: examples/node-network-loss-by-names.yml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: node-network-loss-by-names spec: experiments: - scope: node target: network action: loss desc: "node network loss" matchers: - name: names value: ["node-example-01"] - name: percent value: ["1"] - name: interface value: ["eth0"] ================================================ FILE: examples/pod-bad-resource-size-cpu-mem.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Modify deployment CPU and memory resource limits to simulate bad resource sizing # Equivalent to: # blade create k8s pod-pod badresourcesize --namespace default \ # --workload-type deployment --workload-name nginx-app --cpu 1m --mem 128m --kubeconfig ~/.kube/config apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-bad-resource-size-cpu-mem spec: experiments: - scope: pod target: pod action: badresourcesize desc: "modify deployment CPU and memory resource limits" matchers: - name: namespace value: - "default" - name: workload-type value: - "deployment" - name: workload-name value: - "nginx-app" - name: cpu value: - "1m" - name: mem value: - "128m" ================================================ FILE: examples/pod-bad-resource-size-cpu.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Modify deployment CPU resource limits to simulate bad resource sizing # Equivalent to: # blade create k8s pod-pod badresourcesize --namespace default \ # --workload-type deployment --workload-name nginx-app --cpu 1m --kubeconfig ~/.kube/config apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-bad-resource-size-cpu spec: experiments: - scope: pod target: pod action: badresourcesize desc: "modify deployment CPU resource limits" matchers: - name: namespace value: - "default" - name: workload-type value: - "deployment" - name: workload-name value: - "nginx-app" - name: cpu value: - "1m" ================================================ FILE: examples/pod-bad-resource-size-mem.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Modify deployment memory resource limits to simulate bad resource sizing # Equivalent to: # blade create k8s pod-pod badresourcesize --namespace default \ # --workload-type deployment --workload-name nginx-app --mem 128m --kubeconfig ~/.kube/config apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-bad-resource-size-mem spec: experiments: - scope: pod target: pod action: badresourcesize desc: "modify deployment memory resource limits" matchers: - name: namespace value: - "default" - name: workload-type value: - "deployment" - name: workload-name value: - "nginx-app" - name: mem value: - "128m" ================================================ FILE: examples/pod-configmap-delete.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-configmap-delete spec: experiments: - scope: pod target: pod action: configmapdelete desc: "Delete ConfigMap to simulate Pod startup failure" matchers: - name: labels value: - "app=my-app" - name: namespace value: - "default" # Optional: specify which ConfigMap to delete. # If not specified, the first non-optional ConfigMap from the Pod spec will be selected. # - name: configmap-name # value: # - "my-configmap" ================================================ FILE: examples/pod-containercreating-by-pvc-error.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-containercreating-by-pvc-error spec: experiments: - scope: pod target: pod action: containercreating desc: "Make pod stuck in ContainerCreating state by PVC mount failure" matchers: - name: namespace value: - "nginx" # Optional: customize volume mount path in the container # - name: volume-mount-path # value: # - "/mnt/data" ================================================ FILE: examples/pod-containercreating-disk.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Make pod stuck in ContainerCreating state by cloud disk PVC creation failure. # This simulates the scenario where a StorageClass triggers cloud disk provisioning # but the provisioner fails (zone mismatch, disk type not supported, quota exceeded). # # Equivalent to: # blade create k8s pod-pod containercreating-disk --namespace default \ # --storage-class alicloud-disk-ssd --kubeconfig ~/.kube/config # # Prerequisites: # - The specified StorageClass must exist in the cluster # - The operator should have RBAC permissions to create/delete pods and PVCs apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-containercreating-disk spec: experiments: - scope: pod target: pod action: containercreating-disk desc: "Make pod stuck in ContainerCreating state by cloud disk PVC creation failure" matchers: - name: namespace value: - "default" - name: storage-class value: - "alicloud-disk-ssd" # Optional: PVC storage capacity, default: 20Gi # - name: pv-capacity # value: # - "50Gi" # Optional: volume mount path in the container, default: /mnt/data # - name: volume-mount-path # value: # - "/mnt/data" ================================================ FILE: examples/pod-cpu-load-by-names.yml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: cpu-load spec: experiments: - scope: pod target: cpu action: fullload desc: "increase node cpu load by names" matchers: - name: names value: - "pod-example-01" - name: cpu-percent value: - "80" ================================================ FILE: examples/pod-delete_by_names.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: delete-pod-by-names spec: experiments: - scope: pod target: pod action: delete desc: "delete pod by names" matchers: - name: names value: - "mypod" - name: namespace value: - "default" ================================================ FILE: examples/pod-failedmount-configmap.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Mount a non-existent ConfigMap volume to a deployment to simulate volume mount failure. # This will cause Pods to fail to start because the referenced ConfigMap does not exist. # # Equivalent to: # blade create k8s pod-pod failedmount --namespace default \ # --workload-type deployment --workload-name nginx-app \ # --volume-type configmap --kubeconfig ~/.kube/config # # Prerequisites: # - A deployment named "nginx-app" should exist in the target namespace # - The operator should have RBAC permissions to get/update deployments apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-failedmount-configmap spec: experiments: - scope: pod target: pod action: failedmount desc: "mount a non-existent configmap volume to deployment" matchers: - name: namespace value: - "default" - name: workload-type value: - "deployment" - name: workload-name value: - "nginx-app" - name: volume-type value: - "configmap" ================================================ FILE: examples/pod-failedmount-pvc.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Mount a non-existent PVC volume to a statefulset to simulate volume mount failure. # This will cause Pods to fail to start because the referenced PVC does not exist. # # Equivalent to: # blade create k8s pod-pod failedmount --namespace default \ # --workload-type statefulset --workload-name redis-app \ # --volume-type pvc --kubeconfig ~/.kube/config # # Prerequisites: # - A statefulset named "redis-app" should exist in the target namespace # - The operator should have RBAC permissions to get/update statefulsets apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-failedmount-pvc spec: experiments: - scope: pod target: pod action: failedmount desc: "mount a non-existent pvc volume to statefulset" matchers: - name: namespace value: - "default" - name: workload-type value: - "statefulset" - name: workload-name value: - "redis-app" - name: volume-type value: - "pvc" ================================================ FILE: examples/pod-failedmount-secret.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Mount a non-existent Secret volume to a deployment's init containers # to simulate volume mount failure at init stage. # This will cause Pods to be stuck because the init container cannot start # due to the missing Secret volume. # # Equivalent to: # blade create k8s pod-pod failedmount --namespace default \ # --workload-type deployment --workload-name nginx-app \ # --volume-type secret --with-initcontainer true --kubeconfig ~/.kube/config # # Prerequisites: # - A deployment named "nginx-app" with init containers should exist in the target namespace # - The operator should have RBAC permissions to get/update deployments apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-failedmount-secret spec: experiments: - scope: pod target: pod action: failedmount desc: "mount a non-existent secret volume to deployment init containers" matchers: - name: namespace value: - "default" - name: workload-type value: - "deployment" - name: workload-name value: - "nginx-app" - name: volume-type value: - "secret" - name: with-initcontainer value: - "true" ================================================ FILE: examples/pod-imagepullsecretserror-by-auth-corruption.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-imagepullsecretserror-by-auth-corruption spec: experiments: - scope: pod target: pod action: imagepullsecretserror desc: "Simulate image pull authentication failure by corrupting imagePullSecrets" matchers: - name: names value: - "my-app-pod" - name: namespace value: - "default" # Optional: only corrupt a specific imagePullSecret (by default all are corrupted) # - name: secret-name # value: # - "my-registry-secret" ================================================ FILE: examples/pod-scheduling-failure.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Inject scheduling failure to a deployment by modifying its affinity configuration. # This will cause new Pods to remain in Pending state because no node matches the # injected unreachable affinity rules. # # Prerequisites: # - A deployment named "nginx-sf-test" should exist in the target namespace # - The operator should have RBAC permissions to get/update deployments apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-scheduling-failure spec: experiments: - scope: pod target: pod action: schedulingfailure desc: "Inject scheduling failure by modifying deployment affinity" matchers: - name: namespace value: - "nginx" - name: workload-type value: - "deployment" - name: workload-name value: - "nginx-sf-test" # Optional: affinity type, default is node-affinity # Supported values: node-affinity, node-selector, pod-affinity, pod-anti-affinity # - name: affinity-type # value: # - "node-affinity" ================================================ FILE: examples/pod-taint-node.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Inject scheduling failure by adding unreachable taint to nodes. # This will cause new Pods without matching tolerations to remain in Pending state. # # Prerequisites: # - The operator should have RBAC permissions to get/update nodes apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-taint-node spec: experiments: - scope: pod target: pod action: taintnode desc: "Inject scheduling failure by adding unreachable taint to nodes" matchers: - name: nodes value: - "node1" # Optional: taint effect, default is NoSchedule # Supported values: NoSchedule, NoExecute, PreferNoSchedule # WARNING: NoExecute will evict running pods without matching tolerations # - name: taint-effect # value: # - "NoSchedule" # Optional: custom taint key and value # - name: taint-key # value: # - "chaosblade.io/unreachable" # - name: taint-value # value: # - "true" ================================================ FILE: examples/pod-terminating-by-finalizer.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: pod-terminating-by-finalizer spec: experiments: - scope: pod target: pod action: terminating desc: "Make pod stuck in Terminating state by adding a finalizer that blocks deletion" matchers: - name: names value: - "nginx-140862721796080896-0" - name: namespace value: - "nginx" ================================================ FILE: examples/remove_container_by_id.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: remove-container-by-id spec: experiments: - scope: container target: container action: remove desc: "remove container by id" matchers: - name: container-ids value: ["072aa6bbf2e2e2"] # pod name - name: names value: ["frontend-d89756ff7-szblb"] - name: namespace value: ["default"] ================================================ FILE: examples/tamper_container_dns_by_id.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: tamper-container-dns-by-id spec: experiments: - scope: container target: network action: dns desc: "tamper container dns by id" matchers: - name: container-ids value: - "4b25f66580c4" - name: domain value: ["www.baidu.com"] - name: ip value: ["10.0.0.1"] # pod names - name: names value: ["frontend-d89756ff7-trsxf"] # or use pod labels ================================================ FILE: examples/test-configmap-delete.yaml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: chaosblade.io/v1alpha1 kind: ChaosBlade metadata: name: test-configmap-delete spec: experiments: - scope: pod target: pod action: configmapdelete desc: "Delete ConfigMap to simulate Pod startup failure" matchers: - name: names value: - "my-pod-name" - name: namespace value: - "default" # 可选:指定要删除的 ConfigMap 名称,不指定则自动选择 Pod 依赖的第一个非 optional ConfigMap # - name: configmap-name # value: # - "my-config" ================================================ FILE: exec/container/application.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package container import ( "fmt" "path" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" "github.com/chaosblade-io/chaosblade-operator/version" ) var JvmSpecPathForYaml = "" // getJvmModels returns java experiment specs func getJvmModels() []spec.ExpModelCommandSpec { jvmSpecFile := path.Join(chaosblade.OperatorChaosBladeYaml, fmt.Sprintf("chaosblade-jvm-spec-%s.yaml", version.Version)) if JvmSpecPathForYaml != "" { jvmSpecFile = fmt.Sprintf("%s/chaosblade-jvm-spec-%s.yaml", JvmSpecPathForYaml, version.Version) } modelCommandSpecs := make([]spec.ExpModelCommandSpec, 0) models, err := util.ParseSpecsToModel(jvmSpecFile, nil) if err != nil { logrus.Warningf("parse java spec failed, so skip it, %s", err) return modelCommandSpecs } for idx := range models.Models { modelCommandSpecs = append(modelCommandSpecs, &models.Models[idx]) } return modelCommandSpecs } ================================================ FILE: exec/container/container.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package container import ( "fmt" "strings" criexec "github.com/chaosblade-io/chaosblade-exec-cri/exec" "github.com/chaosblade-io/chaosblade-exec-os/exec/cpu" "github.com/chaosblade-io/chaosblade-exec-os/exec/disk" "github.com/chaosblade-io/chaosblade-exec-os/exec/file" "github.com/chaosblade-io/chaosblade-exec-os/exec/mem" "github.com/chaosblade-io/chaosblade-exec-os/exec/network" "github.com/chaosblade-io/chaosblade-exec-os/exec/network/tc" "github.com/chaosblade-io/chaosblade-exec-os/exec/process" "github.com/chaosblade-io/chaosblade-exec-os/exec/script" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" ) type ResourceModelSpec struct { model.BaseResourceExpModelSpec } // NewResourceModelSpec returns the container model spec func NewResourceModelSpec(client *channel.Client) model.ResourceExpModelSpec { resourceModelSpec := &ResourceModelSpec{ model.NewBaseResourceExpModelSpec("container", client), } containerSelfModelSpec := criexec.NewContainerCommandSpec() javaExpModelSpecs := getJvmModels() subExpModelCommandSpecs := make([]spec.ExpModelCommandSpec, 0) subExpModelCommandSpecs = append(subExpModelCommandSpecs, containerSelfModelSpec) subExpModelCommandSpecs = append(subExpModelCommandSpecs, javaExpModelSpecs...) spec.AddExecutorToModelSpec(&model.ExecCommandInPodExecutor{Client: client}, subExpModelCommandSpecs...) // nsexec osSubExpModelSpecs := model.NewOSSubResourceModelSpec().ExpModels() spec.AddExecutorToModelSpec(&model.CommonExecutor{Client: client}, osSubExpModelSpecs...) subExpModelCommandSpecs = append(subExpModelCommandSpecs, osSubExpModelSpecs...) spec.AddFlagsToModelSpec(getResourceFlags, subExpModelCommandSpecs...) resourceModelSpec.RegisterExpModels(subExpModelCommandSpecs...) addActionExamples(resourceModelSpec) return resourceModelSpec } func addActionExamples(modelSpec *ResourceModelSpec) { for _, expModelSpec := range modelSpec.ExpModelSpecs { for _, action := range expModelSpec.Actions() { v := interface{}(action) switch v.(type) { case *process.KillProcessActionCommandSpec: action.SetLongDesc("The process scenario in container is the same as the basic resource process scenario") action.SetExample( ` # Kill the nginx process in the container blade create k8s container-process kill --process nginx --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Use blade CLI # Specifies the signal and local port to kill the process in the container blade create k8s container-process kill --local-port 8080 --signal 15 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`, ) case *process.StopProcessActionCommandSpec: action.SetLongDesc("The process scenario in container is the same as the basic resource process scenario") action.SetExample( ` # Pause the process that contains the "nginx" keyword in the container blade create k8s container-process stop --process nginx --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Pause the Java process in the container blade create k8s container-process stop --process-cmd java --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`, ) case *cpu.FullLoadActionCommand: action.SetLongDesc("The CPU load experiment scenario in container is the same as the CPU scenario of basic resources") action.SetExample( ` # Create a CPU full load experiment in the container blade create k8s container-cpu load --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default #Specifies two random kernel's full load in the container blade create k8s container-cpu load --cpu-percent 60 --cpu-count 2 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Specifies that the kernel is full load with index 0, 3, and that the kernel's index starts at 0 blade create k8s container-cpu load --cpu-list 0,3 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Specify the kernel full load of indexes 1-3 blade create k8s container-cpu load --cpu-list 1-3 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Specified percentage load in the container blade create k8s container-cpu load --cpu-percent 60 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`, ) case *disk.FillActionSpec: action.SetLongDesc("The disk fill scenario experiment in the container") action.SetExample( ` # Fill the /home directory with 40G of disk space in the container blade create k8s container-disk fill --path /home --size 40000 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Fill the /home directory with 80% of the disk space in the container and retains the file handle that populates the disk blade create k8s container-disk fill --path /home --percent 80 --retain-handle --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Perform a fixed-size experimental scenario in the container blade c k8s container-disk fill --path /home --reserve 1024 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default `, ) case *disk.BurnActionSpec: action.SetLongDesc("Disk read and write IO load experiment in the container") action.SetExample( `# The data of rkB/s, wkB/s and % Util were mainly observed. Perform disk read IO high-load scenarios blade create k8s container-disk burn --read --path /home --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Perform disk write IO high-load scenarios blade create k8s container-disk burn --write --path /home --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default8 # Read and write IO load scenarios are performed at the same time. Path is not specified. The default is / blade create k8s container-disk burn --read --write --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`, ) case *mem.MemLoadActionCommand: action.SetLongDesc("The memory fill experiment scenario in container") action.SetExample( `# The execution memory footprint is 50% blade create k8s container-mem load --mode ram --mem-percent 50 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # The execution memory footprint is 50%, cache model blade create k8s container-mem load --mode cache --mem-percent 50 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # The execution memory footprint is 50%, usage contains buffer/cache blade create k8s container-mem load --mode ram --mem-percent 50 --include-buffer-cache --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # The execution memory footprint is 50% for 200 seconds blade create k8s container-mem load --mode ram --mem-percent 50 --timeout 200 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # 200M memory is reserved blade create k8s container-mem load --mode ram --reserve 200 --rate 100 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`, ) case *file.FileAppendActionSpec: action.SetLongDesc("The file append experiment scenario in container") action.SetExample( `# Appends the content "HELLO WORLD" to the /home/logs/nginx.log file blade create k8s container-file append --filepath=/home/logs/nginx.log --content="HELL WORLD" --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Appends the content "HELLO WORLD" to the /home/logs/nginx.log file, interval 10 seconds blade create k8s container-file append --filepath=/home/logs/nginx.log --content="HELL WORLD" --interval 10 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Appends the content "HELLO WORLD" to the /home/logs/nginx.log file, enable base64 encoding blade create k8s container-file append --filepath=/home/logs/nginx.log --content=SEVMTE8gV09STEQ= --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # mock interface timeout exception blade create k8s container-file append --filepath=/home/logs/nginx.log --content="@{DATE:+%Y-%m-%d %H:%M:%S} ERROR invoke getUser timeout [@{RANDOM:100-200}]ms abc mock exception" --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default `, ) case *file.FileAddActionSpec: action.SetLongDesc("The file add experiment scenario in container") action.SetExample( `# Create a file named nginx.log in the /home directory blade create k8s container-file add --filepath /home/nginx.log --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Create a file named nginx.log in the /home directory with the contents of HELLO WORLD blade create k8s container-file add --filepath /home/nginx.log --content "HELLO WORLD" --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Create a file named nginx.log in the /temp directory and automatically create directories that don't exist blade create k8s container-file add --filepath /temp/nginx.log --auto-create-dir --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Create a directory named /nginx in the /temp directory and automatically create directories that don't exist blade create k8s container-file add --directory --filepath /temp/nginx --auto-create-dir --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default `, ) case *file.FileChmodActionSpec: action.SetLongDesc("The file permission modification scenario in container") action.SetExample(`# Modify /home/logs/nginx.log file permissions to 777 blade create k8s container-file chmod --filepath /home/logs/nginx.log --mark=777 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default `) case *file.FileDeleteActionSpec: action.SetLongDesc("The file delete scenario in container") action.SetExample( `# Delete the file /home/logs/nginx.log blade create k8s container-file delete --filepath /home/logs/nginx.log --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Force delete the file /home/logs/nginx.log unrecoverable blade create k8s container-file delete --filepath /home/logs/nginx.log --force --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default `, ) case *file.FileMoveActionSpec: action.SetExample("The file move scenario in container") action.SetExample(`# Move the file /home/logs/nginx.log to /tmp blade create k8s container-file delete --filepath /home/logs/nginx.log --target /tmp --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Force Move the file /home/logs/nginx.log to /temp blade create k8s container-file delete --filepath /home/logs/nginx.log --target /tmp --force --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Move the file /home/logs/nginx.log to /temp/ and automatically create directories that don't exist blade create k8s container-file delete --filepath /home/logs/nginx.log --target /temp --auto-create-dir --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default `) case *tc.DelayActionSpec: action.SetExample( `# Access to native 8080 and 8081 ports is delayed by 3 seconds, and the delay time fluctuates by 1 second blade create k8s container-network delay --time 3000 --offset 1000 --interface eth0 --local-port 8080,8081 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Local access to external 14.215.177.39 machine (ping www.baidu.com obtained IP) port 80 delay of 3 seconds blade create k8s container-network delay --time 3000 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Do a 5 second delay for the entire network card eth0, excluding ports 22 and 8000 to 8080 blade create k8s container-network delay --time 5000 --interface eth0 --exclude-port 22,8000-8080 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`, ) case *network.DropActionSpec: action.SetExample( `# Experimental scenario of network shielding blade create k8s container-network drop --source-port 80 --network-traffic in --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`, ) case *network.DnsActionSpec: action.SetExample( `# The domain name www.baidu.com is not accessible blade create k8s container-network dns --domain www.baidu.com --ip 10.0.0.0 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`, ) case *tc.LossActionSpec: action.SetExample(`# Access to native 8080 and 8081 ports lost 70% of packets blade create k8s container-network loss --percent 70 --interface eth0 --local-port 8080,8081 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # The machine accesses external 14.215.177.39 machine (ping www.baidu.com) 80 port packet loss rate 100% blade create k8s container-network loss --percent 100 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Do 60% packet loss for the entire network card Eth0, excluding ports 22 and 8000 to 8080 blade create k8s container-network loss --percent 60 --interface eth0 --exclude-port 22,8000-8080 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Realize the whole network card is not accessible, not accessible time 20 seconds. After executing the following command, the current network is disconnected and restored in 20 seconds. Remember!! Don't forget -timeout parameter blade create k8s container-network loss --percent 100 --interface eth0 --timeout 20 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`) case *tc.DuplicateActionSpec: action.SetExample(`# Specify the network card eth0 and repeat the packet by 10% blade create k8s container-network duplicate --percent=10 --interface=eth0 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`) case *tc.CorruptActionSpec: action.SetExample(`# Access to the specified IP request packet is corrupted, 80% of the time blade create k8s container-network corrupt --percent 80 --destination-ip 180.101.49.12 --interface eth0 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`) case *tc.ReorderActionSpec: action.SetExample(`# Access the specified IP request packet disorder blade create k8s container-network reorder --correlation 80 --percent 50 --gap 2 --time 500 --interface eth0 --destination-ip 180.101.49.12 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`) case *network.OccupyActionSpec: action.SetExample(`#Specify port 8080 occupancy blade create k8s container-network occupy --port 8080 --force --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # The machine accesses external 14.215.177.39 machine (ping www.baidu.com) 80 port packet loss rate 100% blade create k8s container-network loss --percent 100 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`) case *script.ScriptDelayActionCommand: action.SetExample(` # Add commands to the script "start0() { sleep 10.000000 ...}" blade create k8s container-script delay --time 10000 --file test.sh --function-name start0 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`) case *script.ScriptExitActionCommand: action.SetExample(` # Add commands to the script "start0() { echo this-is-error-message; exit 1; ... }" blade create k8s container-script exit --exit-code 1 --exit-message this-is-error-message --file test.sh --function-name start0 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default`) case *criexec.RemoveActionCommand: action.SetExample(` # Remove container in pod blade create k8s container-container remove --names cart-redis-77 --container-names cart-redis --namespace default --kubeconfig ~/.kube/config`) default: action.SetExample(strings.Replace( action.Example(), fmt.Sprintf("blade create %s %s", expModelSpec.Name(), action.Name()), fmt.Sprintf("blade create k8s container-%s %s --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default", expModelSpec.Name(), action.Name()), -1, )) action.SetExample(strings.Replace( action.Example(), fmt.Sprintf("blade c %s %s", expModelSpec.Name(), action.Name()), fmt.Sprintf("blade c k8s container-%s %s --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default", expModelSpec.Name(), action.Name()), -1, )) action.SetExample(strings.Replace( action.Example(), fmt.Sprintf("blade create docker %s %s", expModelSpec.Name(), action.Name()), fmt.Sprintf("blade create k8s container-%s %s --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default", expModelSpec.Name(), action.Name()), -1, )) } } } } func getResourceFlags() []spec.ExpFlagSpec { coverageFlags := model.GetResourceCoverageFlags() commonFlags := model.GetResourceCommonFlags() containerFlags := model.GetContainerFlags() chaosbladeFlags := model.GetChaosBladeFlags() networkFlags := model.GetNetworkFlags() return append(append(append(append(coverageFlags, commonFlags...), containerFlags...), chaosbladeFlags...), networkFlags...) } ================================================ FILE: exec/container/controller.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package container import ( "context" "errors" "fmt" "strconv" "strings" "github.com/chaosblade-io/chaosblade-exec-cri/exec/container" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) type ExpController struct { model.BaseExperimentController } func NewExpController(client *channel.Client) model.ExperimentController { return &ExpController{ model.BaseExperimentController{ Client: client, ResourceModelSpec: NewResourceModelSpec(client), }, } } func (*ExpController) Name() string { return "container" } // Create an experiment about container func (e *ExpController) Create(ctx context.Context, expSpec v1alpha1.ExperimentSpec) *spec.Response { expModel := model.ExtractExpModelFromExperimentSpec(expSpec) // priority: id > name > index containerIdsValue := strings.TrimSpace(expModel.ActionFlags[model.ContainerIdsFlag.Name]) containerNamesValue := strings.TrimSpace(expModel.ActionFlags[model.ContainerNamesFlag.Name]) containerIndexValue := strings.TrimSpace(expModel.ActionFlags[model.ContainerIndexFlag.Name]) experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId).WithField("location", util.GetRunFuncName()) lessParameter := fmt.Sprintf("%s|%s|%s", model.ContainerIdsFlag.Name, model.ContainerNamesFlag.Name, model.ContainerIndexFlag.Name) if containerIdsValue == "" && containerNamesValue == "" && containerIndexValue == "" { errMsg := spec.ParameterLess.Sprintf(lessParameter) logrusField.Errorln(errMsg) return spec.ResponseFailWithResult(spec.ParameterLess, v1alpha1.CreateFailExperimentStatus(errMsg, nil), lessParameter) } pods, resp := e.GetMatchedPodResources(ctx, *expModel) if !resp.Success { logrusField.Errorf("uid: %s, get matched pod resources failed, %v", experimentId, resp.Err) resp.Result = v1alpha1.CreateFailExperimentStatus(resp.Err, []v1alpha1.ResourceStatus{}) return resp } containerObjectMetaList, resp := getMatchedContainerMetaList(pods, containerIdsValue, containerNamesValue, containerIndexValue) if !resp.Success { logrusField.Errorf("get matched container meta list failed, %v", resp.Err) resp.Result = v1alpha1.CreateFailExperimentStatus(resp.Err, []v1alpha1.ResourceStatus{}) return resp } if len(containerObjectMetaList) == 0 { // TODO need to optimize errMsg := spec.ParameterInvalid.Sprintf( strings.Join([]string{model.ContainerIdsFlag.Name, model.ContainerNamesFlag.Name, model.ContainerIndexFlag.Name}, "|"), strings.Join([]string{containerIdsValue, containerNamesValue, containerIndexValue}, "|"), "cannot find the containers", ) logrusField.Errorln(errMsg) response := spec.ResponseFailWithResult( spec.ParameterInvalid, v1alpha1.CreateFailExperimentStatus(errMsg, []v1alpha1.ResourceStatus{}), strings.Join([]string{model.ContainerIdsFlag.Name, model.ContainerNamesFlag.Name, model.ContainerIndexFlag.Name}, "|"), strings.Join([]string{containerIdsValue, containerNamesValue, containerIndexValue}, "|"), "cannot find the containers", ) return response } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return e.Exec(ctx, expModel) } // Destroy func (e *ExpController) Destroy(ctx context.Context, expSpec v1alpha1.ExperimentSpec, oldExpStatus v1alpha1.ExperimentStatus) *spec.Response { logrus.WithField("experiment", model.GetExperimentIdFromContext(ctx)).WithField("location", util.GetRunFuncName()).Infof("start to destroy") expModel := model.ExtractExpModelFromExperimentSpec(expSpec) statuses := oldExpStatus.ResStatuses if statuses == nil { return spec.ReturnSuccess(v1alpha1.CreateSuccessExperimentStatus([]v1alpha1.ResourceStatus{})) } containerObjectMetaList := model.ContainerMatchedList{} for _, status := range statuses { if !status.Success { // does not need to destroy continue } containerObjectMeta := model.ParseIdentifier(status.Identifier) containerObjectMeta.Id = status.Id containerObjectMetaList = append(containerObjectMetaList, containerObjectMeta) } if len(containerObjectMetaList) == 0 { return spec.ReturnSuccess(v1alpha1.CreateSuccessExperimentStatus(statuses)) } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return e.Exec(ctx, expModel) } // getMatchedContainerMetaList which will be used in the executor func getMatchedContainerMetaList(pods []v1.Pod, containerIdsValue, containerNamesValue, containerIndexValue string) ( model.ContainerMatchedList, *spec.Response, ) { containerObjectMetaList := model.ContainerMatchedList{} expectedContainerIds := strings.Split(containerIdsValue, ",") expectedContainerNames := strings.Split(containerNamesValue, ",") // priority id>name>index for _, pod := range pods { containerStatuses := pod.Status.ContainerStatuses if containerStatuses == nil { continue } var containerStatusErr error for _, containerStatus := range containerStatuses { // If target container's status is not running, the containerId can not be obtained // The container's status should be checked and return err if not running var containerRuntime, containerId string containerName := containerStatus.Name if containerStatus.ContainerID == "" { containerStatusErr = errors.New("containerId is empty") } else { containerRuntime, containerId = model.TruncateContainerObjectMetaUid(containerStatus.ContainerID) if containerRuntime == container.DockerRuntime { containerId = containerId[:12] } } if containerStatus.State.Running == nil { if containerStatusErr != nil { containerStatusErr = errors.New("is not running, " + containerStatusErr.Error()) } else { containerStatusErr = errors.New("is not running, containerId: " + containerStatus.ContainerID) } } if containerIdsValue != "" { for _, expectedContainerId := range expectedContainerIds { if expectedContainerId == "" { continue } if strings.HasPrefix(expectedContainerId, containerId) { if containerStatusErr != nil { return containerObjectMetaList, spec.ResponseFailWithFlags(spec.ParameterInvalid, model.ContainerIdsFlag.Name, expectedContainerId, fmt.Sprintf("container: %s %s", containerName, containerStatusErr.Error())) } containerObjectMetaList = append(containerObjectMetaList, model.ContainerObjectMeta{ ContainerRuntime: containerRuntime, ContainerId: containerId, ContainerName: containerName, PodName: pod.Name, Namespace: pod.Namespace, NodeName: pod.Spec.NodeName, }) } } } else if containerNamesValue != "" { for _, expectedName := range expectedContainerNames { if expectedName == "" { continue } if expectedName == containerName { // matched if containerStatusErr != nil { return containerObjectMetaList, spec.ResponseFailWithFlags(spec.ParameterInvalid, model.ContainerNamesFlag.Name, expectedName, fmt.Sprintf("container: %s %s", containerName, containerStatusErr.Error())) } containerObjectMetaList = append(containerObjectMetaList, model.ContainerObjectMeta{ ContainerRuntime: containerRuntime, ContainerId: containerId, ContainerName: containerName, PodName: pod.Name, Namespace: pod.Namespace, NodeName: pod.Spec.NodeName, }) } } } } if containerIdsValue == "" && containerNamesValue == "" && containerIndexValue != "" { idx, err := strconv.Atoi(containerIndexValue) if err != nil { return containerObjectMetaList, spec.ResponseFailWithFlags(spec.ParameterIllegal, model.ContainerIndexFlag.Name, containerIndexValue, err) } if idx > len(containerStatuses)-1 { return containerObjectMetaList, spec.ResponseFailWithFlags(spec.ParameterIllegal, model.ContainerIndexFlag.Name, containerIndexValue, "out of bound") } // If target container's status is not running, the containerId can not be obtained // The container's status should be checked and return err if not running if containerStatuses[idx].ContainerID == "" { containerStatusErr = errors.New("containerId is empty") } if containerStatuses[idx].State.Running == nil { if containerStatusErr != nil { containerStatusErr = errors.New("is not running, " + containerStatusErr.Error()) } else { containerStatusErr = errors.New("is not running, containerId: " + containerStatuses[idx].ContainerID) } } if containerStatusErr != nil { return containerObjectMetaList, spec.ResponseFailWithFlags(spec.ParameterInvalid, model.ContainerIndexFlag.Name, idx, fmt.Sprintf("container: %s %s", containerStatuses[idx].Name, containerStatusErr.Error())) } containerRuntime, containerId := model.TruncateContainerObjectMetaUid(containerStatuses[idx].ContainerID) if containerRuntime == container.DockerRuntime { containerId = containerId[:12] } containerObjectMetaList = append(containerObjectMetaList, model.ContainerObjectMeta{ ContainerRuntime: containerRuntime, ContainerId: containerId, ContainerName: containerStatuses[idx].Name, PodName: pod.Name, Namespace: pod.Namespace, NodeName: pod.Spec.NodeName, }) } } return containerObjectMetaList, spec.Success() } ================================================ FILE: exec/controller.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package exec import ( "context" "sync" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/sirupsen/logrus" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/container" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/exec/node" "github.com/chaosblade-io/chaosblade-operator/exec/pod" "github.com/chaosblade-io/chaosblade-operator/exec/service" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) // ResourceDispatchedController contains all resource controllers exclude node resource type ResourceDispatchedController struct { Controllers map[string]model.ExperimentController } var ( executor *ResourceDispatchedController once sync.Once ) // NewDispatcherExecutor initialized when operator starting func NewDispatcherExecutor(client *channel.Client) *ResourceDispatchedController { once.Do(func() { executor = &ResourceDispatchedController{ Controllers: make(map[string]model.ExperimentController, 0), } executor.register( node.NewExpController(client), pod.NewExpController(client), container.NewExpController(client), service.NewExpController(client), ) }) return executor } func (e *ResourceDispatchedController) Name() string { return "dispatch" } func (e *ResourceDispatchedController) Create(bladeName string, expSpec v1alpha1.ExperimentSpec) v1alpha1.ExperimentStatus { logrus.WithField("experiment", bladeName).Infof("start to create experiment") controller := e.Controllers[expSpec.Scope] if controller == nil { logrus.WithField("experiment", bladeName).WithField("scope", expSpec.Scope).Errorf("controller not found") return v1alpha1.ExperimentStatus{ State: "Error", Error: "can not find the scope controller for creating", } } ctx := model.SetExperimentIdToContext(context.Background(), bladeName) response := controller.Create(ctx, expSpec) experimentStatus := createExperimentStatusByResponse(response) experimentStatus.Scope = expSpec.Scope experimentStatus.Target = expSpec.Target experimentStatus.Action = expSpec.Action return experimentStatus } func (e *ResourceDispatchedController) Destroy(bladeName string, expSpec v1alpha1.ExperimentSpec, oldExpStatus v1alpha1.ExperimentStatus) v1alpha1.ExperimentStatus { controller := e.Controllers[expSpec.Scope] if controller == nil { return v1alpha1.ExperimentStatus{ State: "Error", Error: "can not find the scope controller for destroying", } } if oldExpStatus.ResStatuses == nil || len(oldExpStatus.ResStatuses) == 0 { // Still attempt to destroy - the action may have succeeded but status wasn't recorded // (e.g., due to status update conflict). The action's destroy logic should handle // the case where the resource was never modified. } ctx := spec.SetDestroyFlag(context.Background(), bladeName) ctx = model.SetExperimentIdToContext(ctx, bladeName) response := controller.Destroy(ctx, expSpec, oldExpStatus) newExpStatus := createExperimentStatusByResponse(response) newExpStatus = validateAndSetNecessaryFields(newExpStatus, oldExpStatus) return newExpStatus } // validateAndSetNecessaryFields to resolve status overwriting when the experiment is destroyed. func validateAndSetNecessaryFields(status v1alpha1.ExperimentStatus, oldExpStatus v1alpha1.ExperimentStatus) v1alpha1.ExperimentStatus { status.Scope = oldExpStatus.Scope status.Target = oldExpStatus.Target status.Action = oldExpStatus.Action if status.State == "Error" { status.State = oldExpStatus.State } if status.ResStatuses == nil { return status } for _, s := range status.ResStatuses { for _, os := range oldExpStatus.ResStatuses { if s.Id != os.Id { continue } if s.State == "Error" { s.State = os.State } } } return status } // createExperimentStatusByResponse wraps experiment statuses func createExperimentStatusByResponse(response *spec.Response) v1alpha1.ExperimentStatus { experimentStatus := v1alpha1.ExperimentStatus{} if response.Result != nil { experimentStatus = response.Result.(v1alpha1.ExperimentStatus) } else { if response.Success { experimentStatus = v1alpha1.CreateSuccessExperimentStatus([]v1alpha1.ResourceStatus{}) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus(response.Err, []v1alpha1.ResourceStatus{}) } } return experimentStatus } func (e *ResourceDispatchedController) register(cs ...model.ExperimentController) { for _, c := range cs { e.Controllers[c.Name()] = c } } ================================================ FILE: exec/model/category.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model const CategorySystemContainer = "system_container" ================================================ FILE: exec/model/context.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "context" "fmt" "strings" "github.com/sirupsen/logrus" ) const ( ContainerObjectMetaListKey = "ContainerObjectMetaListKey" ExperimentIdKey = "ExperimentIdKey" ) type ContainerObjectMeta struct { // experiment id Id string ContainerRuntime string ContainerId string ContainerName string PodName string NodeName string Namespace string } type ContainerMatchedList []ContainerObjectMeta // GetExperimentIdFromContext func GetExperimentIdFromContext(ctx context.Context) string { experimentId := ctx.Value(ExperimentIdKey) if experimentId == nil { return "UnknownId" } return experimentId.(string) } // SetExperimentIdToContext func SetExperimentIdToContext(ctx context.Context, experimentId string) context.Context { return context.WithValue(ctx, ExperimentIdKey, experimentId) } // GetContainerObjectMetaListFromContext returns the matched container list func GetContainerObjectMetaListFromContext(ctx context.Context) (ContainerMatchedList, error) { containerObjectMetaListValue := ctx.Value(ContainerObjectMetaListKey) if containerObjectMetaListValue == nil { return nil, fmt.Errorf("less container object meta in context") } containerObjectMetaList := containerObjectMetaListValue.(ContainerMatchedList) return containerObjectMetaList, nil } // SetContainerObjectMetaListToContext func SetContainerObjectMetaListToContext(ctx context.Context, containerMatchedList ContainerMatchedList) context.Context { logrus.WithField("experiment", GetExperimentIdFromContext(ctx)).Infof("set container list: %+v", containerMatchedList) return context.WithValue(ctx, ContainerObjectMetaListKey, containerMatchedList) } func (c *ContainerObjectMeta) GetIdentifier() string { identifier := fmt.Sprintf("%s/%s/%s", c.Namespace, c.NodeName, c.PodName) if c.ContainerName != "" { identifier = fmt.Sprintf("%s/%s", identifier, c.ContainerName) } if c.ContainerId != "" { identifier = fmt.Sprintf("%s/%s", identifier, c.ContainerId) } if c.ContainerRuntime != "" { identifier = fmt.Sprintf("%s/%s", identifier, c.ContainerRuntime) } return identifier } // Namespace/Node/Pod/ContainerName/ContainerId/containerRuntime func ParseIdentifier(identifier string) ContainerObjectMeta { ss := strings.SplitN(identifier, "/", 6) meta := ContainerObjectMeta{} switch len(ss) { case 0: return meta case 1: meta.Namespace = ss[0] case 2: meta.Namespace = ss[0] meta.NodeName = ss[1] case 3: meta.Namespace = ss[0] meta.NodeName = ss[1] meta.PodName = ss[2] case 4: meta.Namespace = ss[0] meta.NodeName = ss[1] meta.PodName = ss[2] meta.ContainerName = ss[3] case 5: meta.Namespace = ss[0] meta.NodeName = ss[1] meta.PodName = ss[2] meta.ContainerName = ss[3] meta.ContainerId = ss[4] case 6: meta.Namespace = ss[0] meta.NodeName = ss[1] meta.PodName = ss[2] meta.ContainerName = ss[3] meta.ContainerId = ss[4] meta.ContainerRuntime = ss[5] } return meta } ================================================ FILE: exec/model/controller.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "context" "fmt" "math" "strconv" "strings" "github.com/sirupsen/logrus" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) type ExpController interface { // controller Name Name() string // Create Create(bladeName string, expSpec v1alpha1.ExperimentSpec) v1alpha1.ExperimentStatus // Destroy Destroy(bladeName string, expSpec v1alpha1.ExperimentSpec, oldExpStatus v1alpha1.ExperimentStatus) v1alpha1.ExperimentStatus } type ExperimentController interface { // controller Name Name() string // Create experiment Create(ctx context.Context, expSpec v1alpha1.ExperimentSpec) *spec.Response // Destroy Destroy(ctx context.Context, expSpec v1alpha1.ExperimentSpec, oldExpStatus v1alpha1.ExperimentStatus) *spec.Response } type BaseExperimentController struct { Client *channel.Client ResourceModelSpec ResourceExpModelSpec } func (b *BaseExperimentController) Destroy(ctx context.Context, expSpec v1alpha1.ExperimentSpec) *spec.Response { expModel := ExtractExpModelFromExperimentSpec(expSpec) return b.Exec(ctx, expModel) } // Exec gets action executor and execute experiments func (b *BaseExperimentController) Exec(ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) logrusField.Infof("start to execute: %+v", expModel) // get action spec actionSpec := b.ResourceModelSpec.GetExpActionModelSpec(expModel.Target, expModel.ActionName) if actionSpec == nil { errMsg := "can not find the action handler" logrusField.WithFields(logrus.Fields{ "target": expModel.Target, "action": expModel.ActionName, }).Error(errMsg) handler := fmt.Sprintf("%s.%s", expModel.Target, expModel.ActionName) errMsg = spec.HandlerExecNotFound.Sprintf(handler) return spec.ResponseFailWithResult(spec.HandlerExecNotFound, v1alpha1.CreateFailExperimentStatus(errMsg, []v1alpha1.ResourceStatus{}), handler) } expModel.ActionPrograms = actionSpec.Programs() // invoke action executor response := actionSpec.Executor().Exec(experimentId, ctx, expModel) return response } // ExtractExpModelFromExperimentSpec convert ExperimentSpec to ExpModel func ExtractExpModelFromExperimentSpec(experimentSpec v1alpha1.ExperimentSpec) *spec.ExpModel { expModel := &spec.ExpModel{ Target: experimentSpec.Target, Scope: experimentSpec.Scope, ActionName: experimentSpec.Action, ActionFlags: make(map[string]string, 0), } if experimentSpec.Matchers != nil { for _, flag := range experimentSpec.Matchers { expModel.ActionFlags[flag.Name] = strings.Join(flag.Value, ",") } } return expModel } func GetResourceCount(resourceCount int, flags map[string]string) (int, *spec.Response) { if resourceCount == 0 { return 0, spec.Success() } count := math.MaxInt32 percent := 100 var err error countValue := flags[ResourceCountFlag.Name] if countValue != "" { count, err = strconv.Atoi(countValue) if err != nil { return 0, spec.ResponseFailWithFlags(spec.ParameterIllegal, ResourceCountFlag.Name, countValue, err) } if count == 0 { return 0, spec.ResponseFailWithFlags(spec.ParameterIllegal, ResourceCountFlag.Name, countValue, "it must be a positive integer") } } percentValue := flags[ResourcePercentFlag.Name] if percentValue != "" { percent, err = strconv.Atoi(percentValue) if err != nil { return 0, spec.ResponseFailWithFlags(spec.ParameterIllegal, ResourcePercentFlag.Name, percentValue, err) } if percent == 0 { return 0, spec.ResponseFailWithFlags(spec.ParameterIllegal, ResourcePercentFlag.Name, percentValue, "it must be a positive integer") } } percentCount := int(math.Round(float64(percent) / 100.0 * float64(resourceCount))) if count > percentCount { count = percentCount } if count > resourceCount { return resourceCount, spec.Success() } return count, spec.Success() } // CreateDestroyedStatus returns the ExperimentStatus with destroyed state func CreateDestroyedStatus(oldExpStatus v1alpha1.ExperimentStatus) v1alpha1.ExperimentStatus { statuses := make([]v1alpha1.ResourceStatus, 0) if oldExpStatus.ResStatuses != nil { for _, status := range oldExpStatus.ResStatuses { statuses = append(statuses, v1alpha1.ResourceStatus{ // experiment uid in chaosblade Id: status.Id, // experiment state State: v1alpha1.DestroyedState, Success: true, // resource name Kind: status.Kind, Identifier: status.Identifier, }) } } return v1alpha1.CreateDestroyedExperimentStatus(statuses) } ================================================ FILE: exec/model/controller_test.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "testing" ) func TestGetResourceCount(t *testing.T) { type args struct { resourceCount int flags map[string]string } tests := []struct { name string args args want int wantErr bool }{ {name: "evict-percent=0", args: args{10, map[string]string{"evict-count": "", "evict-percent": "0"}}, want: 0, wantErr: true}, {name: "evict-percent=10", args: args{10, map[string]string{"evict-count": "", "evict-percent": "10"}}, want: 1, wantErr: false}, {name: "evict-percent=55", args: args{10, map[string]string{"evict-count": "", "evict-percent": "55"}}, want: 6, wantErr: false}, {name: "evict-percent=100", args: args{10, map[string]string{"evict-count": "", "evict-percent": "100"}}, want: 10, wantErr: false}, {name: "evict-count=5,evict-percent==10", args: args{10, map[string]string{"evict-count": "5", "evict-percent": "10"}}, want: 1, wantErr: false}, {name: "evict-count=20", args: args{10, map[string]string{"evict-count": "20", "evict-percent": ""}}, want: 10, wantErr: false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, resp := GetResourceCount(tt.args.resourceCount, tt.args.flags) hasErr := resp != nil && !resp.Success if hasErr != tt.wantErr { t.Errorf("GetResourceCount() error = %v, wantErr %v", resp, tt.wantErr) return } if !hasErr && got != tt.want { t.Errorf("GetResourceCount() got = %v, want %v", got, tt.want) } }) } } ================================================ FILE: exec/model/copy.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "archive/tar" "errors" "io" "io/ioutil" "os" "path" "path/filepath" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" "github.com/chaosblade-io/chaosblade-operator/channel" ) type CopyOptions struct { DeployOptions } func makeTar(srcPath, destPath string, writer io.Writer) error { tarWriter := tar.NewWriter(writer) defer tarWriter.Close() srcPath = path.Clean(srcPath) destPath = path.Clean(destPath) err := recursiveTar(path.Dir(srcPath), path.Base(srcPath), path.Dir(destPath), path.Base(destPath), tarWriter) return err } func recursiveTar(srcBase, srcFile, destBase, destFile string, tw *tar.Writer) error { logrus.WithFields(logrus.Fields{ "srcBase": srcBase, "srcFile": srcFile, "destBase": destBase, "destFile": destFile, }).Debugln("recursiveTar") srcPath := path.Join(srcBase, srcFile) matchedPaths, err := filepath.Glob(srcPath) if err != nil { return err } for _, fpath := range matchedPaths { stat, err := os.Lstat(fpath) if err != nil { return err } if stat.IsDir() { files, err := ioutil.ReadDir(fpath) if err != nil { return err } if len(files) == 0 { // case empty directory hdr, _ := tar.FileInfoHeader(stat, fpath) hdr.Name = destFile if err := tw.WriteHeader(hdr); err != nil { return err } } for _, f := range files { if err := recursiveTar(srcBase, path.Join(srcFile, f.Name()), destBase, path.Join(destFile, f.Name()), tw); err != nil { return err } } return nil } else if stat.Mode()&os.ModeSymlink != 0 { // case soft link hdr, _ := tar.FileInfoHeader(stat, fpath) target, err := os.Readlink(fpath) if err != nil { return err } hdr.Linkname = target hdr.Name = destFile if err := tw.WriteHeader(hdr); err != nil { return err } } else { // case regular file or other file type like pipe hdr, err := tar.FileInfoHeader(stat, fpath) if err != nil { return err } hdr.Name = destFile if err := tw.WriteHeader(hdr); err != nil { return err } f, err := os.Open(fpath) if err != nil { return err } defer f.Close() if _, err := io.Copy(tw, f); err != nil { return err } return f.Close() } } return nil } func (o *CopyOptions) execute(options *channel.ExecOptions) error { if len(options.PodNamespace) == 0 { options.PodNamespace = o.Namespace } if len(o.Container) > 0 { options.ContainerName = o.Container } if err := o.client.Exec(options); err != nil { return err.(error) } return nil } // DeployToPod copies src file or directory to specify container func (o *CopyOptions) DeployToPod(experimentId, src, dest string) error { if len(src) == 0 || len(dest) == 0 { return errors.New("filepath can not be empty") } reader, writer := io.Pipe() // strip trailing slash (if any) if dest != "/" && strings.HasSuffix(string(dest[len(dest)-1]), "/") { dest = dest[:len(dest)-1] } go func() error { defer writer.Close() err := makeTar(src, dest, writer) if err != nil { util.Errorf(experimentId, util.GetRunFuncName(), spec.K8sExecFailed.Sprintf("tar", err.Error())) return spec.ResponseFailWithFlags(spec.K8sExecFailed, "tar", err) } return nil }() cmdArr := []string{"tar", "--no-same-permissions", "--no-same-owner", "-xmf", "-"} destDir := path.Dir(dest) if len(destDir) > 0 { cmdArr = append(cmdArr, "-C", destDir) } options := &channel.ExecOptions{ StreamOptions: channel.StreamOptions{ IOStreams: channel.IOStreams{ In: reader, }, Stdin: true, ErrDecoder: func(bytes []byte) interface{} { return errors.New(string(bytes)) }, OutDecoder: func(bytes []byte) interface{} { return nil }, }, PodName: o.PodName, PodNamespace: o.Namespace, ContainerName: o.Container, Command: cmdArr, IgnoreOutput: true, } return o.execute(options) } ================================================ FILE: exec/model/deploy.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "errors" "github.com/chaosblade-io/chaosblade-operator/channel" ) type DeployMode interface { DeployToPod(experimentId, src, dest string) error } type DeployOptions struct { Container string Namespace string PodName string client *channel.Client } // CheckFileExists return nil if dest file exists func (o *DeployOptions) CheckFileExists(dest string) error { options := &channel.ExecOptions{ StreamOptions: channel.StreamOptions{ ErrDecoder: func(bytes []byte) interface{} { return errors.New(string(bytes)) }, OutDecoder: func(bytes []byte) interface{} { return nil }, }, PodNamespace: o.Namespace, PodName: o.PodName, ContainerName: o.Container, Command: []string{"test", "-e", dest}, IgnoreOutput: true, } if err := o.client.Exec(options); err != nil { return err.(error) } return nil } func (o *DeployOptions) CreateDir(dir string) error { if len(dir) == 0 { return errors.New("illegal directory name") } options := &channel.ExecOptions{ StreamOptions: channel.StreamOptions{ ErrDecoder: func(bytes []byte) interface{} { return errors.New(string(bytes)) }, OutDecoder: func(bytes []byte) interface{} { return nil }, }, PodName: o.PodName, PodNamespace: o.Namespace, ContainerName: o.Container, Command: []string{"mkdir", "-p", dir}, IgnoreOutput: true, } if err := o.client.Exec(options); err != nil { return err.(error) } return nil } ================================================ FILE: exec/model/download.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "errors" "fmt" "path" "strconv" "strings" "github.com/sirupsen/logrus" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" ) /* . ├── bin ├── blade ├── lib.tar.gz └── yaml.tar.gz */ const ( bin = "bin" blade = "blade" lib = "lib.tar.gz" yaml = "yaml.tar.gz" ) type DownloadOptions struct { DeployOptions url string } func (d *DownloadOptions) DeployToPod(experimentId, src, dest string) error { if len(src) == 0 { return errors.New("the chaosblade downloaded address is empty") } url := d.getUrl(src) // code=$( curl -s -L -w %{http_code} -o /opt/yaml.tar.gz https://xxx/temp/yaml.tar.gz ) && [ $code = 200 ] && tar -zxf /opt/yaml.tar.gz -C /opt && echo $code || echo $code var command []string isTarFile := strings.HasSuffix(url, "tar.gz") if isTarFile { dest = fmt.Sprintf("%s.%s", dest, "tar.gz") } command = []string{"sh", "-c", "curl -s -L -w %{http_code} " + fmt.Sprintf("-o %s %s && chmod 755 %s", dest, url, dest)} options := &channel.ExecOptions{ StreamOptions: channel.StreamOptions{ ErrDecoder: func(bytes []byte) interface{} { return string(bytes) }, OutDecoder: func(bytes []byte) interface{} { return string(bytes) }, }, PodNamespace: d.Namespace, PodName: d.PodName, ContainerName: d.Container, Command: command, IgnoreOutput: false, } statusCode := d.client.Exec(options).(string) logrus.WithFields( logrus.Fields{ "experimentId": experimentId, "pod": d.PodName, "container": d.Container, "command": command, "result": statusCode, }, ).Infof("download to the target container") code, err := strconv.Atoi(strings.TrimSpace(statusCode)) if err != nil { return errors.New(statusCode) } if code != 200 { return fmt.Errorf("response code is %d", code) } if isTarFile { return d.uncompress(experimentId, dest) } return nil } func (d *DownloadOptions) uncompress(experimentId, file string) error { dir := path.Dir(file) command := []string{"/bin/sh", "-c", fmt.Sprintf("tar -zxf %s -C %s && chmod -R 755 %s", file, dir, dir)} options := &channel.ExecOptions{ StreamOptions: channel.StreamOptions{ ErrDecoder: func(bytes []byte) interface{} { return string(bytes) }, OutDecoder: func(bytes []byte) interface{} { return string(bytes) }, }, PodNamespace: d.Namespace, PodName: d.PodName, ContainerName: d.Container, Command: command, IgnoreOutput: true, } error := d.client.Exec(options) logrus.WithFields( logrus.Fields{ "experimentId": experimentId, "pod": d.PodName, "container": d.Container, "command": command, "result": error, }, ).Infof("uncompress in the target container") if error == nil { return nil } return errors.New(error.(string)) } func (d *DownloadOptions) getUrl(srcFile string) string { obj := srcFile switch srcFile { case chaosblade.OperatorChaosBladeBlade: obj = blade break case chaosblade.OperatorChaosBladeYaml: obj = yaml break case chaosblade.OperatorChaosBladeLib: obj = lib break default: obj = strings.TrimPrefix(srcFile, chaosblade.OperatorChaosBladePath+"/") } return fmt.Sprintf("%s/%s", d.url, obj) } ================================================ FILE: exec/model/executor.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "bytes" "context" "errors" "fmt" "path" "strconv" "strings" "time" "github.com/chaosblade-io/chaosblade-exec-cri/exec" "github.com/chaosblade-io/chaosblade-exec-cri/exec/container" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" pkglabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" cli "sigs.k8s.io/controller-runtime/pkg/client" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" ) func checkExperimentStatus(ctx context.Context, expModel *spec.ExpModel, statuses []v1alpha1.ResourceStatus, identifiers []ExperimentIdentifierInPod, client *channel.Client) { tt := expModel.ActionFlags["timeout"] if _, ok := spec.IsDestroy(ctx); !ok && tt != "" && len(statuses) > 0 { experimentId := GetExperimentIdFromContext(ctx) go func() { timeout, err := strconv.ParseUint(tt, 10, 64) if err != nil { // the err checked in RunE function timeDuartion, _ := time.ParseDuration(tt) timeout = uint64(timeDuartion.Seconds()) } time.Sleep(time.Duration(timeout) * time.Second) ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ticker := time.NewTicker(time.Second) TickerLoop: for range ticker.C { select { case <-ctx.Done(): ticker.Stop() break TickerLoop default: isDestroyed := true for i, status := range statuses { if !status.Success { continue } containerObjectMeta := ParseIdentifier(status.Identifier) identifier := identifiers[i] podName := containerObjectMeta.PodName podNamespace := containerObjectMeta.Namespace containerName := containerObjectMeta.ContainerName if identifier.ChaosBladePodName != "" { podName = identifier.ChaosBladePodName podNamespace = identifier.ChaosBladeNamespace containerName = identifier.ChaosBladeContainerName } response := client.Exec(&channel.ExecOptions{ StreamOptions: channel.StreamOptions{ ErrDecoder: func(bytes []byte) interface{} { content := string(bytes) util.Errorf(identifier.Id, util.GetRunFuncName(), spec.K8sExecFailed.Sprintf("pods/exec", content)) return spec.Decode(content, spec.ResponseFailWithFlags(spec.K8sExecFailed, "pods/exec", content)) }, OutDecoder: func(bytes []byte) interface{} { content := string(bytes) util.Errorf(identifier.Id, util.GetRunFuncName(), spec.K8sExecFailed.Sprintf("pods/exec", content)) return spec.Decode(content, spec.ResponseFailWithFlags(spec.K8sExecFailed, "pods/exec", content)) }, }, PodName: podName, PodNamespace: podNamespace, ContainerName: containerName, Command: []string{getTargetChaosBladeBin(expModel), "status", status.Id}, IgnoreOutput: false, }).(*spec.Response) if response.Success { result := response.Result.(map[string]interface{}) if result["Status"] != v1alpha1.DestroyedState { isDestroyed = false break } } else { isDestroyed = false break } } if isDestroyed { logrus.Info("The experiment was destroyed, ExperimentId: ", experimentId) cb := &v1alpha1.ChaosBlade{} err := client.Client.Get(context.TODO(), types.NamespacedName{Name: experimentId}, cb) if err != nil { logrus.Warn(err.Error()) continue } if cb.Status.Phase != v1alpha1.ClusterPhaseDestroyed { cb.Status.Phase = v1alpha1.ClusterPhaseDestroyed err = client.Client.Status().Update(context.TODO(), cb) if err != nil { logrus.Warn(err.Error()) } continue } objectMeta := metav1.ObjectMeta{Name: experimentId} err = client.Client.Delete(context.TODO(), &v1alpha1.ChaosBlade{ TypeMeta: metav1.TypeMeta{ APIVersion: "chaosblade.io/v1alpha1", Kind: "ChaosBlade", }, ObjectMeta: objectMeta, }) if err != nil { logrus.Warn(err.Error()) } else { ticker.Stop() } } } } }() } } func execCommands(isDestroy bool, rsStatus v1alpha1.ResourceStatus, identifier ExperimentIdentifierInPod, client *channel.Client, ) (bool, v1alpha1.ResourceStatus) { success := false // handle chaos experiments using daemonset mode podName := identifier.PodName podNamespace := identifier.Namespace containerName := identifier.ContainerName if identifier.ChaosBladePodName != "" { podName = identifier.ChaosBladePodName podNamespace = identifier.ChaosBladeNamespace containerName = identifier.ChaosBladeContainerName } response := client.Exec(&channel.ExecOptions{ StreamOptions: channel.StreamOptions{ IOStreams: channel.IOStreams{ Out: bytes.NewBuffer([]byte{}), ErrOut: bytes.NewBuffer([]byte{}), }, ErrDecoder: func(bytes []byte) interface{} { content := string(bytes) util.Errorf(identifier.Id, util.GetRunFuncName(), spec.K8sExecFailed.Sprintf("pods/exec", content)) return spec.Decode(content, spec.ResponseFailWithFlags(spec.K8sExecFailed, "pods/exec", content)) }, OutDecoder: func(bytes []byte) interface{} { content := string(bytes) util.Infof(identifier.Id, util.GetRunFuncName(), fmt.Sprintf("exec output: %s", content)) // TODO ?? 不应该返回错我 return spec.Decode(content, spec.ResponseFailWithFlags(spec.K8sExecFailed, "pods/exec", content)) }, }, PodName: podName, PodNamespace: podNamespace, ContainerName: containerName, Command: strings.Split(identifier.Command, " "), }).(*spec.Response) if response.Success { if !isDestroy { rsStatus.Id = response.Result.(string) } rsStatus = rsStatus.CreateSuccessResourceStatus() success = true } else { rsStatus = rsStatus.CreateFailResourceStatus(response.Err, response.Code) } return success, rsStatus } func generateDestroyCommands(experimentId string, expModel *spec.ExpModel, containerObjectMetaList ContainerMatchedList, matchers string, client *channel.Client, ) ([]ExperimentIdentifierInPod, error) { command := fmt.Sprintf("%s destroy %s %s %s", getTargetChaosBladeBin(expModel), expModel.Target, expModel.ActionName, matchers) identifiers := make([]ExperimentIdentifierInPod, 0) for idx, obj := range containerObjectMetaList { generatedCommand := command if obj.Id != "" { generatedCommand = fmt.Sprintf("%s --uid %s", command, obj.Id) } identifierInPod := ExperimentIdentifierInPod{ ContainerObjectMeta: containerObjectMetaList[idx], Command: generatedCommand, } resp := deployChaosBlade(experimentId, expModel, obj, false, client) if !resp.Success { identifierInPod.Error = resp.Err identifierInPod.Code = resp.Code } identifiers = append(identifiers, identifierInPod) } return identifiers, nil } func generateCreateCommands(experimentId string, expModel *spec.ExpModel, containerObjectMetaList ContainerMatchedList, matchers string, client *channel.Client, ) ([]ExperimentIdentifierInPod, error) { command := fmt.Sprintf("%s create %s %s %s", getTargetChaosBladeBin(expModel), expModel.Target, expModel.ActionName, matchers) identifiers := make([]ExperimentIdentifierInPod, 0) chaosBladeOverride := expModel.ActionFlags[exec.ChaosBladeOverrideFlag.Name] == "true" for idx, obj := range containerObjectMetaList { identifierInPod := ExperimentIdentifierInPod{ ContainerObjectMeta: containerObjectMetaList[idx], Command: command, } resp := deployChaosBlade(experimentId, expModel, obj, chaosBladeOverride, client) if !resp.Success { identifierInPod.Error = resp.Err identifierInPod.Code = resp.Code } identifiers = append(identifiers, identifierInPod) } return identifiers, nil } // GetChaosBladeDaemonsetPodName func GetChaosBladeDaemonsetPodName(nodeName string, client *channel.Client) (string, error) { podName := chaosblade.DaemonsetPodNames[nodeName] if podName == "" { if err := refreshChaosBladeDaemonsetPodNames(client); err != nil { return "", err } return chaosblade.DaemonsetPodNames[nodeName], nil } // check pod := v1.Pod{} err := client.Get(context.Background(), cli.ObjectKey{ Namespace: chaosblade.DaemonsetPodNamespace, Name: podName, }, &pod) if err == nil { return podName, nil } // refresh if err := refreshChaosBladeDaemonsetPodNames(client); err != nil { return "", err } return chaosblade.DaemonsetPodNames[nodeName], nil } func refreshChaosBladeDaemonsetPodNames(client *channel.Client) error { podList := v1.PodList{} opts := cli.ListOptions{ Namespace: chaosblade.DaemonsetPodNamespace, LabelSelector: pkglabels.SelectorFromSet(chaosblade.DaemonsetPodLabels), } if err := client.List(context.TODO(), &podList, &opts); err != nil { return err } podNames := make(map[string]string, len(podList.Items)) for _, pod := range podList.Items { podNames[pod.Spec.NodeName] = pod.Name } chaosblade.DaemonsetPodNames = podNames return nil } func getNodeExperimentIdentifiers(experimentId string, expModel *spec.ExpModel, containerMatchedList ContainerMatchedList, matchers string, destroy bool, client *channel.Client) ([]ExperimentIdentifierInPod, error) { if destroy { return generateDestroyNodeCommands(experimentId, expModel, containerMatchedList, matchers, client) } return generateCreateNodeCommands(experimentId, expModel, containerMatchedList, matchers, client) } func generateDestroyNodeCommands(experimentId string, expModel *spec.ExpModel, containerObjectMetaList ContainerMatchedList, matchers string, client *channel.Client) ([]ExperimentIdentifierInPod, error) { command := fmt.Sprintf("%s destroy %s %s %s", getTargetChaosBladeBin(expModel), expModel.Target, expModel.ActionName, matchers) identifiers := make([]ExperimentIdentifierInPod, 0) for idx, obj := range containerObjectMetaList { generatedCommand := command if obj.Id != "" { generatedCommand = fmt.Sprintf("%s --uid %s", command, obj.Id) } daemonsetPodName, err := GetChaosBladeDaemonsetPodName(obj.NodeName, client) if err != nil { logrus.WithField("experiment", experimentId). Errorf("get chaosblade tool pod for destroying failed on %s node, %v", obj.NodeName, err) return identifiers, err } identifierInPod := ExperimentIdentifierInPod{ ContainerObjectMeta: containerObjectMetaList[idx], Command: generatedCommand, ChaosBladeContainerName: chaosblade.DaemonsetPodName, ChaosBladeNamespace: chaosblade.DaemonsetPodNamespace, ChaosBladePodName: daemonsetPodName, } identifiers = append(identifiers, identifierInPod) } return identifiers, nil } func generateCreateNodeCommands(experimentId string, expModel *spec.ExpModel, containerObjectMetaList ContainerMatchedList, matchers string, client *channel.Client) ([]ExperimentIdentifierInPod, error) { command := fmt.Sprintf("%s create %s %s %s", getTargetChaosBladeBin(expModel), expModel.Target, expModel.ActionName, matchers) identifiers := make([]ExperimentIdentifierInPod, 0) for idx, obj := range containerObjectMetaList { daemonsetPodName, err := GetChaosBladeDaemonsetPodName(obj.NodeName, client) if err != nil { logrus.WithField("experiment", experimentId). Errorf("get chaosblade tool pod for creating failed on %s node, %v", obj.NodeName, err) return identifiers, err } identifierInPod := ExperimentIdentifierInPod{ ContainerObjectMeta: containerObjectMetaList[idx], Command: command, ChaosBladeContainerName: chaosblade.DaemonsetPodName, ChaosBladeNamespace: chaosblade.DaemonsetPodNamespace, ChaosBladePodName: daemonsetPodName, } identifiers = append(identifiers, identifierInPod) } return identifiers, nil } // getTargetChaosBladePath return the chaosblade deployed path in target container func getTargetChaosBladePath(expModel *spec.ExpModel) string { chaosbladePath := expModel.ActionFlags[ChaosBladePathFlag.Name] if chaosbladePath == "" { return chaosblade.OperatorChaosBladePath } return path.Join(chaosbladePath, "chaosblade") } // getTargetChaosBladeBin returns the blade deployed path in target container func getTargetChaosBladeBin(expModel *spec.ExpModel) string { return path.Join(getTargetChaosBladePath(expModel), "blade") } func ExcludeKeyFunc() func() map[string]spec.Empty { return GetResourceFlagNames } func TruncateContainerObjectMetaUid(uid string) (containerRuntime, containerId string) { if strings.HasPrefix(uid, "containerd://") { return container.ContainerdRuntime, strings.ReplaceAll(uid, "containerd://", "") } return container.DockerRuntime, strings.ReplaceAll(uid, "docker://", "") } func getDeployMode(options DeployOptions, expModel *spec.ExpModel) (DeployMode, error) { mode := expModel.ActionFlags[ChaosBladeDeployModeFlag.Name] url := expModel.ActionFlags[ChaosBladeDownloadUrlFlag.Name] switch mode { case CopyMode: return &CopyOptions{options}, nil case DownloadMode: if url == "" { url = chaosblade.DownloadUrl } if url == "" { return nil, errors.New("must config the chaosblade-download-url flag") } return &DownloadOptions{options, url}, nil default: return &CopyOptions{options}, nil } } ================================================ FILE: exec/model/executor_copy.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "context" "fmt" "path" "sync" "github.com/chaosblade-io/chaosblade-exec-cri/exec/container" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" "github.com/chaosblade-io/chaosblade-operator/version" ) type ExperimentIdentifierInPod struct { ContainerObjectMeta Command string Error string Code int32 // For daemonset ChaosBladePodName string ChaosBladeNamespace string ChaosBladeContainerName string } type ExecCommandInPodExecutor struct { Client *channel.Client } func (e *ExecCommandInPodExecutor) Name() string { return "execInPod" } func (e *ExecCommandInPodExecutor) SetChannel(channel spec.Channel) { } // execInMatchedPod will execute the experiment in the target pod func (e *ExecCommandInPodExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { logrusField := logrus.WithField("experiment", GetExperimentIdFromContext(ctx)) experimentStatus := v1alpha1.ExperimentStatus{ ResStatuses: make([]v1alpha1.ResourceStatus, 0), } experimentIdentifiers, err := getExperimentIdentifiers(ctx, expModel, e.Client) if err != nil { logrusField.Errorf("get experiment identifiers failed, err: %s", err.Error()) return spec.ResponseFailWithResult(spec.GetIdentifierFailed, v1alpha1.CreateFailExperimentStatus(err.Error(), []v1alpha1.ResourceStatus{}), err) } logrusField.Infof("experiment identifiers: %v", experimentIdentifiers) statuses := experimentStatus.ResStatuses success := true _, isDestroy := spec.IsDestroy(ctx) updateResultLock := &sync.Mutex{} execCommandInPod := func(i int) { execSuccess := true identifier := experimentIdentifiers[i] rsStatus := v1alpha1.ResourceStatus{ Kind: expModel.Scope, Identifier: identifier.GetIdentifier(), Id: identifier.Id, } if identifier.Error != "" { rsStatus.CreateFailResourceStatus(identifier.Error, spec.K8sExecFailed.Code) execSuccess = false } else if identifier.PodName != "" { // check if pod exist pod := &v1.Pod{} err := e.Client.Get(context.TODO(), types.NamespacedName{ Namespace: identifier.Namespace, Name: identifier.PodName, }, pod) if err != nil { if apierrors.IsNotFound(err) { // pod if not exist, the execution is considered successful. msg := fmt.Sprintf("pod: %s in %s not found, skip to execute command in it", identifier.PodName, identifier.Namespace) logrusField.Warningln(msg) rsStatus.CreateSuccessResourceStatus() rsStatus.Error = msg success = true } else { // if get pod error, the execution is considered failure msg := fmt.Sprintf("get pod: %s in %s error", identifier.PodName, identifier.Namespace) rsStatus.CreateFailResourceStatus(msg, spec.K8sExecFailed.Code) execSuccess = false } } } if execSuccess { logrusField.Infof("execute identifier: %+v", identifier) execSuccess, rsStatus = execCommands(isDestroy, rsStatus, identifier, e.Client) } updateResultLock.Lock() statuses = append(statuses, rsStatus) // If false occurs once, the result is fails success = success && execSuccess updateResultLock.Unlock() } ParallelizeExec(len(experimentIdentifiers), execCommandInPod) logrusField.Infof("success: %t, statuses: %+v", success, statuses) if success { experimentStatus.State = v1alpha1.SuccessState } else { experimentStatus.State = v1alpha1.ErrorState if len(statuses) == 0 { experimentStatus.Error = "the resources not found" } else { experimentStatus.Error = "see resStatus for the error details" } } experimentStatus.Success = success experimentStatus.ResStatuses = append(experimentStatus.ResStatuses, statuses...) checkExperimentStatus(ctx, expModel, statuses, experimentIdentifiers, e.Client) return spec.ReturnResultIgnoreCode(experimentStatus) } func getExperimentIdentifiers(ctx context.Context, expModel *spec.ExpModel, client *channel.Client) ([]ExperimentIdentifierInPod, error) { delete(expModel.ActionFlags, "uid") containerObjectMetaList, err := GetContainerObjectMetaListFromContext(ctx) if err != nil { return []ExperimentIdentifierInPod{}, err } excludeFlagsFunc := ExcludeKeyFunc() matchers := spec.ConvertExpMatchersToString(expModel, excludeFlagsFunc) experimentId := GetExperimentIdFromContext(ctx) _, destroy := spec.IsDestroy(ctx) isDockerNetwork := expModel.ActionFlags[IsDockerNetworkFlag.Name] == "true" UseSidecarContainerNetwork := expModel.ActionFlags[UseSidecarContainerNetworkFlag.Name] == "true" isContainerSelfTarget := expModel.Target == "container" isContainerNetworkTarget := expModel.Target == "network" isNodeScope := expModel.Scope == "node" if isNodeScope { return getNodeExperimentIdentifiers(experimentId, expModel, containerObjectMetaList, matchers, destroy, client) } if isContainerSelfTarget || (isContainerNetworkTarget && (isDockerNetwork || UseSidecarContainerNetwork)) { if version.CheckVerisonHaveCriCommand() || containerObjectMetaList[0].ContainerRuntime == container.ContainerdRuntime { return getCriExperimentIdentifiers(experimentId, expModel, containerObjectMetaList, matchers, destroy, isContainerNetworkTarget, client) } return getDockerExperimentIdentifiers(experimentId, expModel, containerObjectMetaList, matchers, destroy, isContainerNetworkTarget, client) } if destroy { return generateDestroyCommands(experimentId, expModel, containerObjectMetaList, matchers, client) } return generateCreateCommands(experimentId, expModel, containerObjectMetaList, matchers, client) } func getDockerExperimentIdentifiers(experimentId string, expModel *spec.ExpModel, containerObjectMetaList ContainerMatchedList, matchers string, destroy, isNetworkTarget bool, client *channel.Client, ) ([]ExperimentIdentifierInPod, error) { if isNetworkTarget { matchers = fmt.Sprintf("%s --image-repo %s --image-version %s", matchers, chaosblade.Constant.ImageRepoFunc(), chaosblade.Version) } if destroy { return generateDestroyDockerCommands(experimentId, expModel, containerObjectMetaList, matchers, isNetworkTarget, client) } return generateCreateDockerCommands(experimentId, expModel, containerObjectMetaList, matchers, client) } func getCriExperimentIdentifiers(experimentId string, expModel *spec.ExpModel, containerObjectMetaList ContainerMatchedList, matchers string, destroy, isNetworkTarget bool, client *channel.Client, ) ([]ExperimentIdentifierInPod, error) { if isNetworkTarget { matchers = fmt.Sprintf("%s --image-repo %s --image-version %s", matchers, chaosblade.Constant.ImageRepoFunc(), chaosblade.Version) } if destroy { return generateDestroyCriCommands(experimentId, expModel, containerObjectMetaList, matchers, client) } return generateCreateCriCommands(experimentId, expModel, containerObjectMetaList, matchers, client) } func generateDestroyDockerCommands(experimentId string, expModel *spec.ExpModel, containerObjectMetaList ContainerMatchedList, matchers string, isNetworkTarget bool, client *channel.Client, ) ([]ExperimentIdentifierInPod, error) { command := fmt.Sprintf("%s destroy docker %s %s %s", getTargetChaosBladeBin(expModel), expModel.Target, expModel.ActionName, matchers) identifiers := make([]ExperimentIdentifierInPod, 0) for idx, obj := range containerObjectMetaList { daemonsetPodName, err := GetChaosBladeDaemonsetPodName(obj.NodeName, client) if err != nil { logrus.WithField("experiment", experimentId). Errorf("get chaosblade tool pod for destroying failed on %s node, %v", obj.NodeName, err) return identifiers, err } generatedCommand := command if isNetworkTarget { newContainerId, err := getNewContainerIdByPod(obj.PodName, obj.Namespace, obj.ContainerName, experimentId, client) if err != nil { logrus.WithField("experiment", experimentId).Errorf("generate destroy docker command failed, %v", err) continue } generatedCommand = fmt.Sprintf("%s --container-id %s", generatedCommand, newContainerId) } else { if obj.Id != "" { generatedCommand = fmt.Sprintf("%s --uid %s", command, obj.Id) } generatedCommand = fmt.Sprintf("%s --container-name %s", generatedCommand, obj.ContainerName) } identifierInPod := ExperimentIdentifierInPod{ ContainerObjectMeta: containerObjectMetaList[idx], Command: generatedCommand, ChaosBladeContainerName: chaosblade.DaemonsetPodName, ChaosBladeNamespace: chaosblade.DaemonsetPodNamespace, ChaosBladePodName: daemonsetPodName, } identifiers = append(identifiers, identifierInPod) } return identifiers, nil } func generateCreateDockerCommands(experimentId string, expModel *spec.ExpModel, containerObjectMetaList ContainerMatchedList, matchers string, client *channel.Client, ) ([]ExperimentIdentifierInPod, error) { command := fmt.Sprintf("%s create docker %s %s %s", getTargetChaosBladeBin(expModel), expModel.Target, expModel.ActionName, matchers) identifiers := make([]ExperimentIdentifierInPod, 0) for idx, obj := range containerObjectMetaList { daemonsetPodName, err := GetChaosBladeDaemonsetPodName(obj.NodeName, client) if err != nil { logrus.WithField("experiment", experimentId). Errorf("get chaosblade tool pod for creating failed on %s node, %v", obj.NodeName, err) return identifiers, err } generatedCommand := fmt.Sprintf("%s --container-id %s", command, obj.ContainerId) identifierInPod := ExperimentIdentifierInPod{ ContainerObjectMeta: containerObjectMetaList[idx], Command: generatedCommand, ChaosBladeContainerName: chaosblade.DaemonsetPodName, ChaosBladeNamespace: chaosblade.DaemonsetPodNamespace, ChaosBladePodName: daemonsetPodName, } identifiers = append(identifiers, identifierInPod) } return identifiers, nil } func generateDestroyCriCommands( experimentId string, expModel *spec.ExpModel, containerObjectMetaList ContainerMatchedList, matchers string, client *channel.Client, ) ([]ExperimentIdentifierInPod, error) { command := fmt.Sprintf("%s destroy cri %s %s %s", getTargetChaosBladeBin(expModel), expModel.Target, expModel.ActionName, matchers) identifiers := make([]ExperimentIdentifierInPod, 0) for idx, obj := range containerObjectMetaList { daemonsetPodName, err := GetChaosBladeDaemonsetPodName(obj.NodeName, client) if err != nil { logrus.WithField("experiment", experimentId). Errorf("get chaosblade tool pod for destroying failed on %s node, %v", obj.NodeName, err) return identifiers, err } generatedCommand := command if obj.Id != "" { generatedCommand = fmt.Sprintf("%s --uid %s", generatedCommand, obj.Id) } generatedCommand = fmt.Sprintf("%s --container-name %s --container-runtime %s", generatedCommand, obj.ContainerName, obj.ContainerRuntime) identifierInPod := ExperimentIdentifierInPod{ ContainerObjectMeta: containerObjectMetaList[idx], Command: generatedCommand, ChaosBladeContainerName: chaosblade.DaemonsetPodName, ChaosBladeNamespace: chaosblade.DaemonsetPodNamespace, ChaosBladePodName: daemonsetPodName, } identifiers = append(identifiers, identifierInPod) } return identifiers, nil } func generateCreateCriCommands(experimentId string, expModel *spec.ExpModel, containerObjectMetaList ContainerMatchedList, matchers string, client *channel.Client, ) ([]ExperimentIdentifierInPod, error) { command := fmt.Sprintf("%s create cri %s %s %s", getTargetChaosBladeBin(expModel), expModel.Target, expModel.ActionName, matchers) identifiers := make([]ExperimentIdentifierInPod, 0) for idx, obj := range containerObjectMetaList { daemonsetPodName, err := GetChaosBladeDaemonsetPodName(obj.NodeName, client) if err != nil { logrus.WithField("experiment", experimentId). Errorf("get chaosblade tool pod for creating failed on %s node, %v", obj.NodeName, err) return identifiers, err } generatedCommand := fmt.Sprintf("%s --container-id %s --container-runtime %s", command, obj.ContainerId, containerObjectMetaList[idx].ContainerRuntime) identifierInPod := ExperimentIdentifierInPod{ ContainerObjectMeta: containerObjectMetaList[idx], Command: generatedCommand, ChaosBladeContainerName: chaosblade.DaemonsetPodName, ChaosBladeNamespace: chaosblade.DaemonsetPodNamespace, ChaosBladePodName: daemonsetPodName, } identifiers = append(identifiers, identifierInPod) } return identifiers, nil } func deployChaosBlade(experimentId string, expModel *spec.ExpModel, obj ContainerObjectMeta, override bool, client *channel.Client, ) *spec.Response { logrusField := logrus.WithField("experiment", experimentId) chaosBladePath := getTargetChaosBladePath(expModel) options := DeployOptions{ Container: obj.ContainerName, Namespace: obj.Namespace, PodName: obj.PodName, client: client, } deploy, err := getDeployMode(options, expModel) if err != nil { util.Errorf(experimentId, util.GetRunFuncName(), spec.ParameterLess.Sprintf(ChaosBladeDownloadUrlFlag.Name)) return spec.ResponseFailWithFlags(spec.ParameterLess, ChaosBladeDownloadUrlFlag.Name) } logrusField.Infof("deploy chaosblade under override with %t value", override) chaosBladeBinPath := path.Join(chaosBladePath, "bin") if err := options.CheckFileExists(chaosBladeBinPath); err != nil { // create chaosblade path if err := options.CreateDir(chaosBladeBinPath); err != nil { util.Errorf(experimentId, util.GetRunFuncName(), fmt.Sprintf("create chaosblade dir: %s, failed! err: %s", chaosBladeBinPath, err.Error())) return spec.ResponseFailWithFlags(spec.ParameterInvalidBladePathError, ChaosBladePathFlag.Name, chaosBladeBinPath, err) } } bladePath := path.Join(chaosBladePath, "blade") if override || options.CheckFileExists(bladePath) != nil { if err := deploy.DeployToPod(experimentId, chaosblade.OperatorChaosBladeBlade, bladePath); err != nil { util.Errorf(experimentId, util.GetRunFuncName(), fmt.Sprintf("deploy blade failed! dir: %s, err: %s", bladePath, err.Error())) return spec.ResponseFailWithFlags(spec.DeployChaosBladeFailed, bladePath, err) } } yamlPath := path.Join(chaosBladePath, "yaml") if override || options.CheckFileExists(yamlPath) != nil { if err := deploy.DeployToPod(experimentId, chaosblade.OperatorChaosBladeYaml, yamlPath); err != nil { util.Errorf(experimentId, util.GetRunFuncName(), fmt.Sprintf("deploy yaml failed! dir: %s, err: %s", yamlPath, err.Error())) return spec.ResponseFailWithFlags(spec.DeployChaosBladeFailed, yamlPath, err) } } chaosOSPath := path.Join(chaosBladePath, "bin", "chaos_os") if override || options.CheckFileExists(chaosOSPath) != nil { if err := deploy.DeployToPod(experimentId, path.Join(chaosblade.OperatorChaosBladeBin, "chaos_os"), chaosOSPath); err != nil { util.Errorf(experimentId, util.GetRunFuncName(), fmt.Sprintf("deploy chaos_os failed! dir: %s, err: %s", chaosOSPath, err.Error())) return spec.ResponseFailWithFlags(spec.DeployChaosBladeFailed, chaosOSPath, err) } } // copy files as needed for _, program := range expModel.ActionPrograms { var programFile, operatorProgramFile string switch program { case "java": programFile = path.Join(chaosBladePath, "lib") operatorProgramFile = chaosblade.OperatorChaosBladeLib default: programFile = path.Join(chaosBladePath, "bin", program) operatorProgramFile = path.Join(chaosblade.OperatorChaosBladeBin, program) } if !override && options.CheckFileExists(programFile) == nil { logrusField.WithField("program", programFile).Infof("program exists") continue } err := deploy.DeployToPod(experimentId, operatorProgramFile, programFile) logrusField = logrusField.WithFields(logrus.Fields{ "container": obj.ContainerName, "pod": obj.PodName, "namespace": obj.Namespace, }) if err != nil { util.Errorf(experimentId, util.GetRunFuncName(), fmt.Sprintf("copy chaosblade to pod failed! dir: %s, err: %s", yamlPath, err.Error())) return spec.ResponseFailWithFlags(spec.K8sExecFailed, "copyToPod", err) } logrusField.Infof("deploy %s success", programFile) } return spec.Success() } func getNewContainerIdByPod(podName, podNamespace, containerName, experimentId string, client *channel.Client) (string, error) { pod := v1.Pod{} err := client.Get(context.TODO(), types.NamespacedName{Namespace: podNamespace, Name: podName}, &pod) if err != nil { logrus.WithFields( logrus.Fields{ "experiment": experimentId, "containerName": containerName, }, ).Warningf("can not find the pod by %s name in %s namespace, %v", podName, podNamespace, err) return "", err } containerStatuses := pod.Status.ContainerStatuses if containerStatuses == nil { return "", fmt.Errorf("cannot find containers in %s pod", podName) } for _, containerStatus := range containerStatuses { if containerName == containerStatus.Name { _, containerLongId := TruncateContainerObjectMetaUid(containerStatus.ContainerID) if len(containerLongId) > 12 { return containerLongId[:12], nil } return "", fmt.Errorf("the container %s id is illegal", containerLongId) } } return "", fmt.Errorf("cannot find the %s container in %s pod", containerName, podName) } ================================================ FILE: exec/model/executor_nsexec.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "context" "fmt" "strings" "sync" "github.com/chaosblade-io/chaosblade-exec-cri/exec/container" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" ) type CommonExecutor struct { Client *channel.Client } func (e *CommonExecutor) Name() string { return "CommonExecutor" } func (e *CommonExecutor) SetChannel(channel spec.Channel) { } func (e *CommonExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { logrusField := logrus.WithField("experiment", GetExperimentIdFromContext(ctx)) experimentStatus := v1alpha1.ExperimentStatus{ ResStatuses: make([]v1alpha1.ResourceStatus, 0), } experimentIdentifiers, err := getExperimentIdentifiersWithNsexec(ctx, expModel, e.Client) if err != nil { logrusField.Errorf("get experiment identifiers failed, err: %s", err.Error()) return spec.ResponseFailWithResult(spec.GetIdentifierFailed, v1alpha1.CreateFailExperimentStatus(err.Error(), []v1alpha1.ResourceStatus{}), err) } logrusField.Infof("experiment identifiers: %v", experimentIdentifiers) statuses := experimentStatus.ResStatuses success := true _, isDestroy := spec.IsDestroy(ctx) updateResultLock := &sync.Mutex{} execCommandInPod := func(i int) { execSuccess := true identifier := experimentIdentifiers[i] rsStatus := v1alpha1.ResourceStatus{ Kind: expModel.Scope, Identifier: identifier.GetIdentifier(), Id: identifier.Id, } if identifier.Error != "" { rsStatus.CreateFailResourceStatus(identifier.Error, spec.K8sExecFailed.Code) execSuccess = false } else if identifier.PodName != "" { // check if pod exist pod := &v1.Pod{} err := e.Client.Get(context.TODO(), types.NamespacedName{ Namespace: identifier.Namespace, Name: identifier.PodName, }, pod) if err != nil { if apierrors.IsNotFound(err) { // pod if not exist, the execution is considered successful. msg := fmt.Sprintf("pod: %s in %s not found, skip to execute command in it", identifier.PodName, identifier.Namespace) logrusField.Warningln(msg) rsStatus.CreateSuccessResourceStatus() rsStatus.Error = msg success = true } else { // if get pod error, the execution is considered failure msg := fmt.Sprintf("get pod: %s in %s error", identifier.PodName, identifier.Namespace) rsStatus.CreateFailResourceStatus(msg, spec.K8sExecFailed.Code) execSuccess = false } } } if execSuccess { logrusField.Infof("execute identifier: %+v", identifier) execSuccess, rsStatus = execCommands(isDestroy, rsStatus, identifier, e.Client) } updateResultLock.Lock() statuses = append(statuses, rsStatus) // If false occurs once, the result is fails success = success && execSuccess updateResultLock.Unlock() } ParallelizeExec(len(experimentIdentifiers), execCommandInPod) logrusField.Infof("success: %t, statuses: %+v", success, statuses) if success { experimentStatus.State = v1alpha1.SuccessState } else { experimentStatus.State = v1alpha1.ErrorState if len(statuses) == 0 { experimentStatus.Error = "the resources not found" } else { experimentStatus.Error = "see resStatus for the error details" } } experimentStatus.Success = success experimentStatus.ResStatuses = append(experimentStatus.ResStatuses, statuses...) checkExperimentStatus(ctx, expModel, statuses, experimentIdentifiers, e.Client) return spec.ReturnResultIgnoreCode(experimentStatus) } func getExperimentIdentifiersWithNsexec(ctx context.Context, expModel *spec.ExpModel, client *channel.Client) ([]ExperimentIdentifierInPod, error) { delete(expModel.ActionFlags, "uid") containerObjectMetaList, err := GetContainerObjectMetaListFromContext(ctx) if err != nil { return []ExperimentIdentifierInPod{}, err } excludeFlagsFunc := ExcludeKeyFunc() matchers := spec.ConvertExpMatchersToString(expModel, excludeFlagsFunc) experimentId := GetExperimentIdFromContext(ctx) _, destroy := spec.IsDestroy(ctx) isNodeScope := expModel.Scope == "node" if isNodeScope { return getNodeExperimentIdentifiers(experimentId, expModel, containerObjectMetaList, matchers, destroy, client) } var ( scope = "cri" handle = "create" ) if destroy { handle = "destroy" } command := fmt.Sprintf("%s %s %s %s %s %s", getTargetChaosBladeBin(expModel), handle, scope, expModel.Target, expModel.ActionName, matchers) identifiers := make([]ExperimentIdentifierInPod, 0) for idx, obj := range containerObjectMetaList { var generatedCommand string if expModel.Target == "network" && handle == "destroy" && expModel.ActionName != "dns" { labels := []string{ fmt.Sprintf("io.kubernetes.pod.name=%s", obj.PodName), fmt.Sprintf("io.kubernetes.pod.namespace=%s", obj.Namespace), } if obj.ContainerRuntime == container.DockerRuntime { labels = append(labels, "io.kubernetes.docker.type=podsandbox") } else if obj.ContainerRuntime == container.ContainerdRuntime { labels = append(labels, "io.cri-containerd.kind=sandbox") } else { logrus.WithField("experiment", experimentId). Errorf("unsupported container runtime %s", obj.ContainerRuntime) return identifiers, fmt.Errorf("unsupported container runtime %s", obj.ContainerRuntime) } generatedCommand = fmt.Sprintf("%s --container-label-selector %s --container-runtime %s", command, strings.Join(labels, ","), obj.ContainerRuntime) } else { generatedCommand = fmt.Sprintf("%s --container-id %s", command, obj.ContainerId) if expModel.ActionProcessHang { generatedCommand = fmt.Sprintf("%s --cgroup-root /host-sys/fs/cgroup", generatedCommand) } if len(obj.ContainerRuntime) > 0 { generatedCommand = fmt.Sprintf("%s --container-runtime %s", generatedCommand, obj.ContainerRuntime) } if obj.Id != "" { generatedCommand = fmt.Sprintf("%s --uid %s", generatedCommand, obj.Id) } } daemonsetPodName, err := GetChaosBladeDaemonsetPodName(obj.NodeName, client) if err != nil { logrus.WithField("experiment", experimentId). Errorf("get chaosblade tool pod for destroying failed on %s node, %v", obj.NodeName, err) return identifiers, err } identifierInPod := ExperimentIdentifierInPod{ ContainerObjectMeta: containerObjectMetaList[idx], Command: generatedCommand, ChaosBladeContainerName: chaosblade.DaemonsetPodName, ChaosBladeNamespace: chaosblade.DaemonsetPodNamespace, ChaosBladePodName: daemonsetPodName, } identifiers = append(identifiers, identifierInPod) } return identifiers, nil } ================================================ FILE: exec/model/filter.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "fmt" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" pkglabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" ) func GetOneAvailableContainerIdFromPod(pod v1.Pod) (containerId, containerName, runtime string, err error) { containerStatuses := pod.Status.ContainerStatuses if len(containerStatuses) == 0 { return "", "", "", fmt.Errorf("the container statues is empty in %s pod", pod.Name) } for _, containerStatus := range containerStatuses { if containerStatus.State.Running == nil { continue } runtime, containerId := TruncateContainerObjectMetaUid(containerStatus.ContainerID) return containerId, containerStatus.Name, runtime, nil } return "", "", "", fmt.Errorf("cannot find a valiable container in %s pod", pod.Name) } func ParseLabels(labels string) []pkglabels.Requirement { labelArr := strings.Split(labels, ",") requirements := make([]pkglabels.Requirement, 0, len(labelArr)) labelsMap := make(map[string][]string, 0) if labels == "" { return requirements } for _, label := range labelArr { keyValue := strings.SplitN(label, "=", 2) if len(keyValue) != 2 { logrus.Warningf("label %s is illegal", label) continue } if labelsMap[keyValue[0]] == nil { valueArr := make([]string, 0) valueArr = append(valueArr, keyValue[1]) labelsMap[keyValue[0]] = valueArr } else { labelsMap[keyValue[0]] = append(labelsMap[keyValue[0]], keyValue[1]) } } for label, value := range labelsMap { requirement, err := pkglabels.NewRequirement(label, selection.In, value) if err != nil { logrus.Warningf("requirement %s-%s is illegal", label, value) continue } requirements = append(requirements, *requirement) } return requirements } func MapContains(bigMap map[string]string, requirements []pkglabels.Requirement) bool { if bigMap == nil || requirements == nil { return false } labelSet := pkglabels.Set(bigMap) for i := 0; i < len(requirements); i++ { if requirements[i].Matches(labelSet) { return true } } return false } func CheckFlags(flags map[string]string) *spec.Response { // Must include one flag in the count, percent, labels and names expFlags := []*spec.ExpFlag{ ResourceCountFlag, ResourcePercentFlag, ResourceLabelsFlag, ResourceNamesFlag, } value := "" flagsNames := make([]string, 0) for _, flag := range expFlags { flagsNames = append(flagsNames, flag.Name) value = fmt.Sprintf("%s%s", value, flags[flag.Name]) } if value == "" { return spec.ResponseFailWithFlags(spec.ParameterLess, strings.Join(flagsNames, "|")) } return spec.Success() } ================================================ FILE: exec/model/filter_pod.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "context" "math/rand" "strings" "time" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" pkglabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/chaosblade-io/chaosblade-operator/channel" ) const DefaultNamespace = "default" func CheckPodFlags(flags map[string]string) *spec.Response { namespace := flags[ResourceNamespaceFlag.Name] if namespace == "" { return spec.ResponseFailWithFlags(spec.ParameterLess, ResourceNamespaceFlag.Name) } namespacesValue := strings.Split(namespace, ",") if len(namespacesValue) > 1 { return spec.ResponseFailWithFlags(spec.ParameterInvalidNSNotOne, ResourceNamespaceFlag.Name) } return CheckFlags(flags) } // GetMatchedPodResources return matched pods func (b *BaseExperimentController) GetMatchedPodResources(ctx context.Context, expModel spec.ExpModel) ([]v1.Pod, *spec.Response) { flags := expModel.ActionFlags if flags[ResourceNamespaceFlag.Name] == "" { expModel.ActionFlags[ResourceNamespaceFlag.Name] = DefaultNamespace } if resp := CheckPodFlags(flags); !resp.Success { return nil, resp } pods, resp := resourceFunc(ctx, b.Client, flags) if !resp.Success { return pods, resp } return b.filterByOtherFlags(pods, flags) } func (b *BaseExperimentController) filterByOtherFlags(pods []v1.Pod, flags map[string]string) ([]v1.Pod, *spec.Response) { random := flags["random"] == "true" groupKey := flags[ResourceGroupKeyFlag.Name] if groupKey == "" { count, resp := GetResourceCount(len(pods), flags) if !resp.Success { return pods[:count], resp } if random { return randomPodSelected(pods, count), spec.Success() } return pods[:count], spec.Success() } groupPods := make(map[string][]v1.Pod, 0) keys := strings.Split(groupKey, ",") for _, pod := range pods { for _, key := range keys { labelValue := pod.Labels[key] podList := groupPods[labelValue] if podList == nil { podList = []v1.Pod{} groupPods[labelValue] = podList } groupPods[labelValue] = append(podList, pod) } } result := make([]v1.Pod, 0) for _, podList := range groupPods { count, resp := GetResourceCount(len(podList), flags) if !resp.Success { return pods[:count], resp } if random { result = append(result, randomPodSelected(podList, count)...) } else { result = append(result, podList[:count]...) } } if len(result) == 0 { return result, spec.ResponseFailWithFlags(spec.ParameterInvalidK8sPodQuery, ResourceGroupKeyFlag.Name) } return result, spec.Success() } // resourceFunc is used to query the target resource var resourceFunc = func(ctx context.Context, client2 *channel.Client, flags map[string]string) ([]v1.Pod, *spec.Response) { namespace := flags[ResourceNamespaceFlag.Name] labels := flags[ResourceLabelsFlag.Name] requirements := ParseLabels(labels) logrusField := logrus.WithField("experiment", GetExperimentIdFromContext(ctx)) pods := make([]v1.Pod, 0) names := flags[ResourceNamesFlag.Name] logrusField.Debugf("namespace: %s, labels: %s, names: %s", namespace, labels, names) if names != "" { nameArr := strings.Split(names, ",") for _, name := range nameArr { pod := v1.Pod{} err := client2.Get(context.TODO(), types.NamespacedName{Namespace: namespace, Name: name}, &pod) if err != nil { logrusField.Warningf("can not find the pod by %s name in %s namespace, %v", name, namespace, err) continue } if len(requirements) > 0 { if MapContains(pod.Labels, requirements) { pods = append(pods, pod) } } else { pods = append(pods, pod) } } logrusField.Infof("get pods by names %s, len is %d", names, len(pods)) if len(pods) == 0 { return pods, spec.ResponseFailWithFlags(spec.ParameterInvalidK8sPodQuery, names) } return pods, spec.Success() } if labels != "" && len(requirements) == 0 { msg := spec.ParameterIllegal.Sprintf(ResourceLabelsFlag.Name, labels, "data format error") logrusField.Warningln(msg) return pods, spec.ResponseFailWithFlags(spec.ParameterLess, ResourceLabelsFlag.Name, labels, "data format error, example: key=value") } if len(requirements) > 0 { podList := v1.PodList{} selector := pkglabels.NewSelector().Add(requirements...) opts := client.ListOptions{Namespace: namespace, LabelSelector: selector} err := client2.List(context.TODO(), &podList, &opts) if err != nil { return pods, spec.ResponseFailWithFlags(spec.K8sExecFailed, "PodList", err) } if len(podList.Items) == 0 { return pods, spec.ResponseFailWithFlags(spec.ParameterInvalidK8sPodQuery, ResourceLabelsFlag.Name) } // filter out running but TERMINATING pods for _, p := range podList.Items { if p.ObjectMeta.DeletionTimestamp != nil { logrusField.Debugf("the pod is being deleted: %s", p.Name) continue } pods = append(pods, p) } logrusField.Infof("get pods by labels %s, len is %d", labels, len(pods)) } return pods, spec.Success() } func randomPodSelected(pods []v1.Pod, count int) []v1.Pod { if len(pods) == 0 { return pods } rand.Seed(time.Now().UnixNano()) for i := len(pods) - 1; i > 0; i-- { num := rand.Intn(i + 1) pods[i], pods[num] = pods[num], pods[i] } return pods[:count] } ================================================ FILE: exec/model/filter_pod_test.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "reflect" "testing" v1 "k8s.io/api/core/v1" v12 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func Test_randomSelected(t *testing.T) { originList := []v1.Pod{ {ObjectMeta: v12.ObjectMeta{Name: "1"}}, {ObjectMeta: v12.ObjectMeta{Name: "2"}}, {ObjectMeta: v12.ObjectMeta{Name: "3"}}, {ObjectMeta: v12.ObjectMeta{Name: "4"}}, {ObjectMeta: v12.ObjectMeta{Name: "5"}}, {ObjectMeta: v12.ObjectMeta{Name: "6"}}, {ObjectMeta: v12.ObjectMeta{Name: "7"}}, {ObjectMeta: v12.ObjectMeta{Name: "8"}}, {ObjectMeta: v12.ObjectMeta{Name: "9"}}, {ObjectMeta: v12.ObjectMeta{Name: "10"}}, } randomList := randomPodSelected(originList, 5) var randomNameList []string for _, item := range randomList { randomNameList = append(randomNameList, item.ObjectMeta.Name) } t.Logf("randomNameList()=%v", randomNameList) if reflect.DeepEqual(randomNameList, []string{"1", "2", "3", "4", "5"}) { t.Errorf("randomPodSelected() is invalid") } } ================================================ FILE: exec/model/model.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "context" "github.com/chaosblade-io/chaosblade-exec-cri/exec" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) // ActionPreProcessor defines the interface for action-specific pre-processing // before the main create/destroy flow. Actions that don't require pod matching // or need custom validation should implement this interface. type ActionPreProcessor interface { // PreCreate is called before the main create flow. // Returns a modified context for downstream processing, or a Response // if validation fails or early return is needed. // If Response is nil, the main flow continues with the returned context. PreCreate(ctx context.Context, expModel *spec.ExpModel, client *channel.Client) (context.Context, *spec.Response) // PreDestroy is called before the main destroy flow. // Returns a modified context for downstream processing, or a Response // if validation fails or early return is needed. // If Response is nil, the main flow continues with the returned context. PreDestroy(ctx context.Context, expModel *spec.ExpModel, client *channel.Client, oldExpStatus v1alpha1.ExperimentStatus) (context.Context, *spec.Response) } // ResourceExpModelSpec contains node, pod, container type ResourceExpModelSpec interface { Scope() string ExpModels() map[string]spec.ExpModelCommandSpec GetExpActionModelSpec(target, action string) spec.ExpActionCommandSpec } func NewBaseResourceExpModelSpec(scopeName string, client *channel.Client) BaseResourceExpModelSpec { return BaseResourceExpModelSpec{ ScopeName: scopeName, Client: client, ExpModelSpecs: make(map[string]spec.ExpModelCommandSpec, 0), } } type BaseResourceExpModelSpec struct { ScopeName string Client *channel.Client ExpModelSpecs map[string]spec.ExpModelCommandSpec } func (b *BaseResourceExpModelSpec) Scope() string { return b.ScopeName } func (b *BaseResourceExpModelSpec) ExpModels() map[string]spec.ExpModelCommandSpec { return b.ExpModelSpecs } func (b *BaseResourceExpModelSpec) GetExpActionModelSpec(target, actionName string) spec.ExpActionCommandSpec { commandSpec := b.ExpModelSpecs[target] if commandSpec == nil { return nil } actions := commandSpec.Actions() if actions == nil { return nil } for _, action := range actions { if action.Name() == actionName { return action } for _, alias := range action.Aliases() { if alias == actionName { return action } } } return nil } func (b *BaseResourceExpModelSpec) RegisterExpModels(expModel ...spec.ExpModelCommandSpec) { for _, model := range expModel { b.ExpModelSpecs[model.Name()] = model } } // SubResourceExpModelSpec contains os exps in node, network exp in pod and os exps in container type SubResourceExpModelSpec interface { ExpModels() []spec.ExpModelCommandSpec Executor() spec.Executor } type BaseSubResourceExpModelSpec struct { ExpModelSpecs []spec.ExpModelCommandSpec ExpExecutor spec.Executor } func (b *BaseSubResourceExpModelSpec) ExpModels() []spec.ExpModelCommandSpec { return b.ExpModelSpecs } func (b *BaseSubResourceExpModelSpec) Executor() spec.Executor { return b.ExpExecutor } var ResourceCountFlag = &spec.ExpFlag{ Name: "evict-count", Desc: "Count of affected resource", NoArgs: false, Required: false, } var ResourcePercentFlag = &spec.ExpFlag{ Name: "evict-percent", Desc: "Percent of affected resource, integer value without %", NoArgs: false, Required: false, } func GetResourceCoverageFlags() []spec.ExpFlagSpec { return []spec.ExpFlagSpec{ ResourceCountFlag, ResourcePercentFlag, } } var ResourceNamesFlag = &spec.ExpFlag{ Name: "names", Desc: "Resource names, such as pod name. You must add namespace flag for it. Multiple parameters are separated directly by commas", NoArgs: false, Required: false, } var ResourceNamespaceFlag = &spec.ExpFlag{ Name: "namespace", Desc: "Namespace, such as default, only one value can be specified", NoArgs: false, Required: true, } var ResourceLabelsFlag = &spec.ExpFlag{ Name: "labels", Desc: "Label selector, the relationship between values that are or", NoArgs: false, Required: false, } var ResourceGroupKeyFlag = &spec.ExpFlag{ Name: "evict-group", Desc: "Group key from labels", NoArgs: false, Required: false, } var ContainerIdsFlag = &spec.ExpFlag{ Name: "container-ids", Desc: "Container ids", NoArgs: false, Required: false, } var ContainerNamesFlag = &spec.ExpFlag{ Name: "container-names", Desc: "Container names", NoArgs: false, Required: false, } var ContainerIndexFlag = &spec.ExpFlag{ Name: "container-index", Desc: "Container index, start from 0", } var ChaosBladePathFlag = &spec.ExpFlag{ Name: "chaosblade-path", Desc: "Chaosblade tool deployment path, default value is /opt. Please select a path with write permission", } var ChaosBladeDownloadUrlFlag = &spec.ExpFlag{ Name: "chaosblade-download-url", Desc: "The chaosblade downloaded address. If you use download deployment mode, you must specify the value, or config chaosblade-download-url when deploying the operator", } var ( DownloadMode = "download" CopyMode = "copy" ) var ChaosBladeDeployModeFlag = &spec.ExpFlag{ Name: "chaosblade-deploy-mode", Desc: "The mode of chaosblade deployment in container, the values are copy and download, the default value is copy which copy tool from the operator to the target container. If you select download mode, the operator will download chaosblade tool from the chaosblade-download-url.", } var IsDockerNetworkFlag = &spec.ExpFlag{ Name: "is-docker-network", Desc: "Used when a docker container is used and there is no tc command in the target container. Just for docker command, Deprecated! Please use use-sidecar-container-network flag.", NoArgs: true, Required: false, } var UseSidecarContainerNetworkFlag = &spec.ExpFlag{ Name: "use-sidecar-container-network", Desc: "When there is no tc command in the target container. Set the sidecar container network true.", NoArgs: true, Required: false, } func GetNetworkFlags() []spec.ExpFlagSpec { return []spec.ExpFlagSpec{ IsDockerNetworkFlag, UseSidecarContainerNetworkFlag, } } func GetContainerFlags() []spec.ExpFlagSpec { return []spec.ExpFlagSpec{ ContainerIdsFlag, ContainerNamesFlag, ContainerIndexFlag, } } func GetResourceCommonFlags() []spec.ExpFlagSpec { return []spec.ExpFlagSpec{ ResourceNamesFlag, ResourceNamespaceFlag, ResourceLabelsFlag, ResourceGroupKeyFlag, } } func GetChaosBladeFlags() []spec.ExpFlagSpec { return []spec.ExpFlagSpec{ ChaosBladePathFlag, exec.ChaosBladeOverrideFlag, ChaosBladeDeployModeFlag, ChaosBladeDownloadUrlFlag, } } func GetResourceFlagNames() map[string]spec.Empty { flagNames := []string{ ResourceCountFlag.Name, ResourcePercentFlag.Name, ResourceNamesFlag.Name, ResourceNamespaceFlag.Name, ResourceLabelsFlag.Name, ContainerIdsFlag.Name, ContainerNamesFlag.Name, ContainerIndexFlag.Name, ChaosBladePathFlag.Name, exec.ChaosBladeOverrideFlag.Name, ChaosBladeDeployModeFlag.Name, ChaosBladeDownloadUrlFlag.Name, IsDockerNetworkFlag.Name, UseSidecarContainerNetworkFlag.Name, } names := make(map[string]spec.Empty, 0) for _, name := range flagNames { names[name] = spec.Empty{} } return names } ================================================ FILE: exec/model/osexp.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import ( "github.com/chaosblade-io/chaosblade-exec-os/exec/cpu" "github.com/chaosblade-io/chaosblade-exec-os/exec/disk" "github.com/chaosblade-io/chaosblade-exec-os/exec/file" "github.com/chaosblade-io/chaosblade-exec-os/exec/mem" "github.com/chaosblade-io/chaosblade-exec-os/exec/network" "github.com/chaosblade-io/chaosblade-exec-os/exec/process" "github.com/chaosblade-io/chaosblade-exec-os/exec/script" "github.com/chaosblade-io/chaosblade-spec-go/spec" ) type OSSubResourceModelSpec struct { BaseSubResourceExpModelSpec } func NewOSSubResourceModelSpec() SubResourceExpModelSpec { modelSpec := &OSSubResourceModelSpec{ BaseSubResourceExpModelSpec{ ExpModelSpecs: []spec.ExpModelCommandSpec{ cpu.NewCpuCommandModelSpec(), network.NewNetworkCommandSpec(), process.NewProcessCommandModelSpec(), disk.NewDiskCommandSpec(), mem.NewMemCommandModelSpec(), file.NewFileCommandSpec(), script.NewScriptCommandModelSpec(), }, }, } return modelSpec } ================================================ FILE: exec/model/parallelizer.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package model import "sync" const ( maxWorkers = 64 // magic number ) type DoWorkFunc func(workID int) func ParallelizeExec(workCount int, doWork DoWorkFunc) { workers := maxWorkers toExec := make(chan int, workCount) for i := 0; i < workCount; i++ { toExec <- i } close(toExec) if workCount < workers { workers = workCount } wg := sync.WaitGroup{} wg.Add(workers) for i := 0; i < workers; i++ { go func() { defer wg.Done() for workID := range toExec { doWork(workID) } }() } wg.Wait() } ================================================ FILE: exec/node/cniexp.go ================================================ /* * Copyright 1999-2020 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package node import ( "context" "encoding/base64" "encoding/json" "fmt" "sync" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ( CniBinPathFlag = "cni-bin-path" CniErrorMsgFlag = "error-msg" ) func NewCniExpModelCommandSpec(client *channel.Client) spec.ExpModelCommandSpec { return &CniExpModelCommandSpec{ spec.BaseExpModelCommandSpec{ ExpActions: []spec.ExpActionCommandSpec{ NewCniAddFaultActionSpec(client), NewCniDelFaultActionSpec(client), }, ExpFlags: []spec.ExpFlagSpec{}, }, } } type CniExpModelCommandSpec struct { spec.BaseExpModelCommandSpec } func (*CniExpModelCommandSpec) Name() string { return "cni" } func (*CniExpModelCommandSpec) ShortDesc() string { return "CNI fault experiment" } func (*CniExpModelCommandSpec) LongDesc() string { return "CNI fault experiment, simulate CNI plugin failures on the node" } func (*CniExpModelCommandSpec) Example() string { return `# Auto-discover CNI binary blade create k8s node-cni add_fault --names cn-hangzhou.192.168.0.205 --kubeconfig ~/.kube/config # Or specify explicitly blade create k8s node-cni add_fault --cni-bin-path /opt/cni/bin/calico --names cn-hangzhou.192.168.0.205 --kubeconfig ~/.kube/config` } // CniAddFaultActionSpec func NewCniAddFaultActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &CniAddFaultActionSpec{ spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: CniBinPathFlag, Desc: "The full path of the CNI plugin binary. If not specified, auto-discovered from kubelet CNI config", }, }, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: CniErrorMsgFlag, Desc: "Custom error message returned by the CNI plugin failure", }, }, ActionExecutor: &CniFaultExecutor{client: client, cniCommand: "ADD"}, ActionCategories: []string{model.CategorySystemContainer}, ActionExample: `# Simulate CNI ADD failure with auto-discovered CNI binary blade create k8s node-cni add_fault --names cn-hangzhou.192.168.0.205 --kubeconfig ~/.kube/config # Or specify the CNI binary path explicitly blade create k8s node-cni add_fault --cni-bin-path /opt/cni/bin/calico --names cn-hangzhou.192.168.0.205 --kubeconfig ~/.kube/config # With custom error message blade create k8s node-cni add_fault --cni-bin-path /opt/cni/bin/calico --error-msg "network unavailable" --names cn-hangzhou.192.168.0.205 --kubeconfig ~/.kube/config`, }, } } type CniAddFaultActionSpec struct { spec.BaseExpActionCommandSpec } func (*CniAddFaultActionSpec) Name() string { return "add_fault" } func (*CniAddFaultActionSpec) Aliases() []string { return []string{} } func (*CniAddFaultActionSpec) ShortDesc() string { return "Simulate CNI ADD failure" } func (*CniAddFaultActionSpec) LongDesc() string { return "Simulate CNI ADD failure, new pods will be stuck in ContainerCreating" } // CniDelFaultActionSpec func NewCniDelFaultActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &CniDelFaultActionSpec{ spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: CniBinPathFlag, Desc: "The full path of the CNI plugin binary. If not specified, auto-discovered from kubelet CNI config", }, }, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: CniErrorMsgFlag, Desc: "Custom error message returned by the CNI plugin failure", }, }, ActionExecutor: &CniFaultExecutor{client: client, cniCommand: "DEL"}, ActionCategories: []string{model.CategorySystemContainer}, ActionExample: `# Simulate CNI DEL failure with auto-discovered CNI binary blade create k8s node-cni del_fault --names cn-hangzhou.192.168.0.205 --kubeconfig ~/.kube/config # Or specify the CNI binary path explicitly blade create k8s node-cni del_fault --cni-bin-path /opt/cni/bin/calico --names cn-hangzhou.192.168.0.205 --kubeconfig ~/.kube/config`, }, } } type CniDelFaultActionSpec struct { spec.BaseExpActionCommandSpec } func (*CniDelFaultActionSpec) Name() string { return "del_fault" } func (*CniDelFaultActionSpec) Aliases() []string { return []string{} } func (*CniDelFaultActionSpec) ShortDesc() string { return "Simulate CNI DEL failure" } func (*CniDelFaultActionSpec) LongDesc() string { return "Simulate CNI DEL failure, terminating pods will be stuck" } // CniFaultExecutor type CniFaultExecutor struct { client *channel.Client cniCommand string // "ADD" or "DEL" } func (e *CniFaultExecutor) Name() string { return "cni_fault" } func (e *CniFaultExecutor) SetChannel(channel spec.Channel) { } func (e *CniFaultExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return e.destroy(uid, ctx, expModel) } return e.create(uid, ctx, expModel) } func (e *CniFaultExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { logrusField := logrus.WithField("experiment", model.GetExperimentIdFromContext(ctx)) containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(err.Error(), []v1alpha1.ResourceStatus{})) } cniBinPath := expModel.ActionFlags[CniBinPathFlag] errorMsg := expModel.ActionFlags[CniErrorMsgFlag] if errorMsg == "" { errorMsg = fmt.Sprintf("chaosblade: simulated CNI %s failure", e.cniCommand) } statuses := make([]v1alpha1.ResourceStatus, 0) success := true updateLock := &sync.Mutex{} execFunc := func(i int) { meta := containerObjectMetaList[i] status := v1alpha1.ResourceStatus{ Kind: "node", Identifier: meta.GetIdentifier(), Id: uid, } daemonsetPodName, err := model.GetChaosBladeDaemonsetPodName(meta.NodeName, e.client) if err != nil { logrusField.Errorf("get chaosblade daemonset pod on node %s failed: %v", meta.NodeName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) updateLock.Lock() statuses = append(statuses, status) success = false updateLock.Unlock() return } if daemonsetPodName == "" { errMsg := fmt.Sprintf("chaosblade daemonset pod not found on node %s", meta.NodeName) logrusField.Error(errMsg) status = status.CreateFailResourceStatus(errMsg, spec.K8sExecFailed.Code) updateLock.Lock() statuses = append(statuses, status) success = false updateLock.Unlock() return } resolvedPath := cniBinPath if resolvedPath == "" { discovered, discoverErr := discoverCniBinPath(e.client, daemonsetPodName) if discoverErr != nil { errMsg := fmt.Sprintf("auto-discover CNI binary on node %s failed: %v", meta.NodeName, discoverErr) logrusField.Error(errMsg) status = status.CreateFailResourceStatus(errMsg, spec.K8sExecFailed.Code) updateLock.Lock() statuses = append(statuses, status) success = false updateLock.Unlock() return } logrusField.Infof("auto-discovered CNI binary: %s on node %s", discovered, meta.NodeName) resolvedPath = discovered } script := generateCniCreateScript(resolvedPath, e.cniCommand, errorMsg) resp := execScriptInDaemonsetPod(e.client, daemonsetPodName, script) if resp.Success { status = status.CreateSuccessResourceStatus() } else { status = status.CreateFailResourceStatus(resp.Err, spec.K8sExecFailed.Code) } updateLock.Lock() if !resp.Success { success = false } statuses = append(statuses, status) updateLock.Unlock() } model.ParallelizeExec(len(containerObjectMetaList), execFunc) logrusField.Infof("cni %s fault create result, success: %t, statuses: %+v", e.cniCommand, success, statuses) if success { return spec.ReturnResultIgnoreCode(v1alpha1.CreateSuccessExperimentStatus(statuses)) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses)) } func (e *CniFaultExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { logrusField := logrus.WithField("experiment", model.GetExperimentIdFromContext(ctx)) containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(err.Error(), []v1alpha1.ResourceStatus{})) } cniBinPath := expModel.ActionFlags[CniBinPathFlag] statuses := make([]v1alpha1.ResourceStatus, 0) success := true updateLock := &sync.Mutex{} execFunc := func(i int) { meta := containerObjectMetaList[i] status := v1alpha1.ResourceStatus{ Kind: "node", Identifier: meta.GetIdentifier(), Id: meta.Id, State: v1alpha1.DestroyedState, } daemonsetPodName, err := model.GetChaosBladeDaemonsetPodName(meta.NodeName, e.client) if err != nil { logrusField.Errorf("get chaosblade daemonset pod on node %s failed: %v", meta.NodeName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) updateLock.Lock() statuses = append(statuses, status) success = false updateLock.Unlock() return } if daemonsetPodName == "" { errMsg := fmt.Sprintf("chaosblade daemonset pod not found on node %s", meta.NodeName) logrusField.Error(errMsg) status = status.CreateFailResourceStatus(errMsg, spec.K8sExecFailed.Code) updateLock.Lock() statuses = append(statuses, status) success = false updateLock.Unlock() return } resolvedPath := cniBinPath if resolvedPath == "" { discovered, discoverErr := discoverCniBinPath(e.client, daemonsetPodName) if discoverErr != nil { errMsg := fmt.Sprintf("auto-discover CNI binary on node %s failed: %v", meta.NodeName, discoverErr) logrusField.Error(errMsg) status = status.CreateFailResourceStatus(errMsg, spec.K8sExecFailed.Code) updateLock.Lock() statuses = append(statuses, status) success = false updateLock.Unlock() return } logrusField.Infof("auto-discovered CNI binary: %s on node %s", discovered, meta.NodeName) resolvedPath = discovered } script := generateCniDestroyScript(resolvedPath) resp := execScriptInDaemonsetPod(e.client, daemonsetPodName, script) if resp.Success { status.Success = true } else { status = status.CreateFailResourceStatus(resp.Err, spec.K8sExecFailed.Code) } updateLock.Lock() if !resp.Success { success = false } statuses = append(statuses, status) updateLock.Unlock() } model.ParallelizeExec(len(containerObjectMetaList), execFunc) logrusField.Infof("cni fault destroy result, success: %t, statuses: %+v", success, statuses) if success { return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus(statuses)) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses)) } func generateCniCreateScript(cniBinPath string, cniCommand string, errorMsg string) string { backupPath := cniBinPath + ".chaosblade.bak" // Build the CNI error JSON template using encoding/json for proper escaping. // Use a placeholder for cniVersion which is determined at runtime from stdin. type cniError struct { CniVersion string `json:"cniVersion"` Code int `json:"code"` Msg string `json:"msg"` } errJSON, _ := json.Marshal(cniError{CniVersion: "@@CNI_VER@@", Code: 100, Msg: errorMsg}) b64ErrJSON := base64.StdEncoding.EncodeToString(errJSON) // The wrapper script: fail for targeted CNI_COMMAND, passthrough for all others // Per CNI spec: error result MUST be written to stdout (not stderr), and exit non-zero // We read stdin to extract cniVersion from network config for spec compliance wrapperContent := fmt.Sprintf(`#!/bin/sh if [ "$CNI_COMMAND" = "%s" ]; then cni_input=$(cat) cni_ver=$(echo "$cni_input" | grep -o '"cniVersion" *: *"[^"]*"' | head -1 | grep -o '[0-9][0-9.]*') [ -z "$cni_ver" ] && cni_ver="0.3.1" printf '%%s' '%s' | base64 -d | sed "s/@@CNI_VER@@/$cni_ver/" echo exit 1 fi exec %s "$@" `, cniCommand, b64ErrJSON, backupPath) // Use base64 encoding to safely transport the wrapper content through shell // This avoids all complex single-quote escaping issues b64Content := base64.StdEncoding.EncodeToString([]byte(wrapperContent)) script := fmt.Sprintf(`BIN_PATH='%s' BACKUP_PATH='%s' B64_CONTENT='%s' if [ ! -f "$BIN_PATH" ]; then echo '{"code":404,"success":false,"error":"CNI binary not found: '"$BIN_PATH"'"}' exit 0 fi if [ -f "$BACKUP_PATH" ]; then echo '{"code":409,"success":false,"error":"CNI fault already injected, backup exists: '"$BACKUP_PATH"'"}' exit 0 fi mv "$BIN_PATH" "$BACKUP_PATH" 2>/dev/null if [ $? -ne 0 ]; then echo '{"code":500,"success":false,"error":"failed to backup CNI binary, permission denied or read-only filesystem"}' exit 0 fi echo "$B64_CONTENT" | base64 -d > "$BIN_PATH" if [ $? -ne 0 ]; then mv "$BACKUP_PATH" "$BIN_PATH" 2>/dev/null echo '{"code":500,"success":false,"error":"failed to write wrapper script, rolling back"}' exit 0 fi chmod +x "$BIN_PATH" echo '{"code":200,"success":true}' `, cniBinPath, backupPath, b64Content) return script } func generateCniDiscoverScript() string { return `# Find kubelet PID (take first match in case of multiple) KUBELET_PID=$(pgrep -x kubelet 2>/dev/null | head -1) if [ -z "$KUBELET_PID" ]; then for p in /proc/[0-9]*/comm; do if [ -f "$p" ] && grep -qx kubelet "$p" 2>/dev/null; then KUBELET_PID=$(echo "$p" | cut -d/ -f3) break fi done fi if [ -z "$KUBELET_PID" ]; then echo '{"code":500,"success":false,"error":"kubelet process not found"}' exit 0 fi # Parse kubelet cmdline CMDLINE=$(cat /proc/$KUBELET_PID/cmdline 2>/dev/null | tr '\0' ' ') if [ -z "$CMDLINE" ]; then echo '{"code":500,"success":false,"error":"failed to read kubelet cmdline"}' exit 0 fi # Extract --cni-bin-dir (supports both --flag=value and --flag value forms) CNI_BIN_DIR=$(echo "$CMDLINE" | grep -o '\-\-cni-bin-dir=[^ ]*' | head -1 | cut -d= -f2) if [ -z "$CNI_BIN_DIR" ]; then CNI_BIN_DIR=$(echo "$CMDLINE" | sed -n 's/.*--cni-bin-dir *\([^ ]*\).*/\1/p') fi [ -z "$CNI_BIN_DIR" ] && CNI_BIN_DIR="/opt/cni/bin" # Extract --cni-conf-dir (supports both --flag=value and --flag value forms) CNI_CONF_DIR=$(echo "$CMDLINE" | grep -o '\-\-cni-conf-dir=[^ ]*' | head -1 | cut -d= -f2) if [ -z "$CNI_CONF_DIR" ]; then CNI_CONF_DIR=$(echo "$CMDLINE" | sed -n 's/.*--cni-conf-dir *\([^ ]*\).*/\1/p') fi [ -z "$CNI_CONF_DIR" ] && CNI_CONF_DIR="/etc/cni/net.d" # Find first conflist or conf file (alphabetically, same as kubelet) CONF_FILE=$(ls -1 "$CNI_CONF_DIR"/*.conflist 2>/dev/null | sort | head -1) if [ -z "$CONF_FILE" ]; then CONF_FILE=$(ls -1 "$CNI_CONF_DIR"/*.conf 2>/dev/null | sort | head -1) fi if [ -z "$CONF_FILE" ]; then echo '{"code":500,"success":false,"error":"no CNI config files found in '"$CNI_CONF_DIR"'"}' exit 0 fi # Extract "type" field from config CNI_TYPE=$(grep -o '"type" *: *"[^"]*"' "$CONF_FILE" | head -1 | grep -o '"[^"]*"$' | tr -d '"') if [ -z "$CNI_TYPE" ]; then echo '{"code":500,"success":false,"error":"cannot extract CNI type from '"$CONF_FILE"'"}' exit 0 fi # Compose full binary path and verify CNI_BIN_PATH="${CNI_BIN_DIR}/${CNI_TYPE}" if [ ! -f "$CNI_BIN_PATH" ]; then echo '{"code":500,"success":false,"error":"CNI binary not found at '"$CNI_BIN_PATH"'"}' exit 0 fi echo '{"code":200,"success":true,"result":"'"$CNI_BIN_PATH"'"}' ` } func discoverCniBinPath(client *channel.Client, daemonsetPodName string) (string, error) { script := generateCniDiscoverScript() resp := execScriptInDaemonsetPod(client, daemonsetPodName, script) if !resp.Success { return "", fmt.Errorf("%s", resp.Err) } path, ok := resp.Result.(string) if !ok { return "", fmt.Errorf("unexpected discovery result type: %T", resp.Result) } return path, nil } func generateCniDestroyScript(cniBinPath string) string { backupPath := cniBinPath + ".chaosblade.bak" script := fmt.Sprintf(`BIN_PATH='%s' BACKUP_PATH='%s' if [ ! -f "$BACKUP_PATH" ]; then echo '{"code":200,"success":true}' exit 0 fi rm -f "$BIN_PATH" 2>/dev/null mv "$BACKUP_PATH" "$BIN_PATH" 2>/dev/null if [ $? -ne 0 ]; then echo '{"code":500,"success":false,"error":"failed to restore CNI binary"}' exit 0 fi echo '{"code":200,"success":true}' `, cniBinPath, backupPath) return script } ================================================ FILE: exec/node/controller.go ================================================ /* * Copyright 1999-2020 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package node import ( "context" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) type ExpController struct { model.BaseExperimentController } func NewExpController(client *channel.Client) model.ExperimentController { return &ExpController{ model.BaseExperimentController{ Client: client, ResourceModelSpec: NewResourceModelSpec(client), }, } } func (*ExpController) Name() string { return "node" } func (e *ExpController) Create(ctx context.Context, expSpec v1alpha1.ExperimentSpec) *spec.Response { expModel := model.ExtractExpModelFromExperimentSpec(expSpec) experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) // get nodes nodes, resp := e.getMatchedNodeResources(ctx, *expModel) if !resp.Success { logrusField.Errorf("uid: %s, get matched node resources failed, %v", experimentId, resp.Err) resp.Result = v1alpha1.CreateFailExperimentStatus(resp.Err, []v1alpha1.ResourceStatus{}) return resp } logrusField.Infof("creating node experiment, node count is %d", len(nodes)) containerMatchedList := getContainerMatchedList(nodes) ctx = model.SetContainerObjectMetaListToContext(ctx, containerMatchedList) return e.Exec(ctx, expModel) } func (e *ExpController) Destroy(ctx context.Context, expSpec v1alpha1.ExperimentSpec, oldExpStatus v1alpha1.ExperimentStatus) *spec.Response { logrus.WithField("experiment", model.GetExperimentIdFromContext(ctx)).Infoln("start to destroy") expModel := model.ExtractExpModelFromExperimentSpec(expSpec) statuses := oldExpStatus.ResStatuses if statuses == nil { return spec.ReturnSuccess(v1alpha1.CreateSuccessExperimentStatus([]v1alpha1.ResourceStatus{})) } containerObjectMetaList := model.ContainerMatchedList{} for _, status := range statuses { if !status.Success { continue } containerObjectMeta := model.ParseIdentifier(status.Identifier) containerObjectMeta.Id = status.Id containerObjectMetaList = append(containerObjectMetaList, containerObjectMeta) } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return e.Exec(ctx, expModel) } // getContainerMatchedList transports selected pods func getContainerMatchedList(nodes []v1.Node) model.ContainerMatchedList { containerObjectMetaList := model.ContainerMatchedList{} for _, n := range nodes { containerObjectMetaList = append(containerObjectMetaList, model.ContainerObjectMeta{ NodeName: n.Name, }) } return containerObjectMetaList } ================================================ FILE: exec/node/exec_helper.go ================================================ /* * Copyright 1999-2020 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package node import ( "bytes" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/sirupsen/logrus" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" ) // execScriptInDaemonsetPod executes a shell script inside the chaosblade daemonset pod // using nsenter to enter the host mount namespace. func execScriptInDaemonsetPod(client *channel.Client, podName string, script string) *spec.Response { // Prepend exec 2>/dev/null to suppress stderr from script commands (mv, chmod, etc.) // This is critical because client.Exec prioritizes stderr over stdout - // any stderr output would cause the JSON response from stdout to be ignored. // Note: nsenter's own errors (e.g., command not found, permission denied) happen // BEFORE the script runs, so those still properly surface as errors. fullScript := "exec 2>/dev/null\n" + script cmd := []string{"nsenter", "-t", "1", "-m", "--", "sh", "-c", fullScript} logrus.Infof("exec in daemonset pod %s/%s, container: %s", chaosblade.DaemonsetPodNamespace, podName, chaosblade.DaemonsetPodName) response := client.Exec(&channel.ExecOptions{ StreamOptions: channel.StreamOptions{ IOStreams: channel.IOStreams{ Out: bytes.NewBuffer([]byte{}), ErrOut: bytes.NewBuffer([]byte{}), }, ErrDecoder: func(bytes []byte) interface{} { content := string(bytes) return spec.Decode(content, spec.ResponseFailWithFlags(spec.K8sExecFailed, "pods/exec", content)) }, OutDecoder: func(bytes []byte) interface{} { content := string(bytes) return spec.Decode(content, spec.ResponseFailWithFlags(spec.K8sExecFailed, "pods/exec", content)) }, }, PodName: podName, PodNamespace: chaosblade.DaemonsetPodNamespace, ContainerName: chaosblade.DaemonsetPodName, Command: cmd, }).(*spec.Response) return response } ================================================ FILE: exec/node/filter.go ================================================ /* * Copyright 1999-2020 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package node import ( "context" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" pkglabels "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" ) func (e *ExpController) getMatchedNodeResources(ctx context.Context, expModel spec.ExpModel) ([]v1.Node, *spec.Response) { flags := expModel.ActionFlags if resp := model.CheckFlags(flags); !resp.Success { return nil, resp } nodes, resp := resourceFunc(ctx, e.Client, flags) if !resp.Success { return nil, resp } return e.filterByOtherFlags(nodes, flags) } func (e *ExpController) filterByOtherFlags(nodes []v1.Node, flags map[string]string) ([]v1.Node, *spec.Response) { groupKey := flags[model.ResourceGroupKeyFlag.Name] if groupKey == "" { count, resp := model.GetResourceCount(len(nodes), flags) return nodes[:count], resp } groupNodes := make(map[string][]v1.Node, 0) keys := strings.Split(groupKey, ",") for _, node := range nodes { for _, key := range keys { nodeList := groupNodes[node.Labels[key]] if nodeList == nil { nodeList = make([]v1.Node, 0) } nodeList = append(nodeList, node) } } result := make([]v1.Node, 0) for _, nodeList := range groupNodes { count, resp := model.GetResourceCount(len(nodeList), flags) if !resp.Success { return nodes[:count], resp } result = append(result, nodeList[:count]...) } return result, spec.Success() } var resourceFunc = func(ctx context.Context, client2 *channel.Client, flags map[string]string) ([]v1.Node, *spec.Response) { labels := flags[model.ResourceLabelsFlag.Name] requirements := model.ParseLabels(labels) logrusField := logrus.WithField("experiment", model.GetExperimentIdFromContext(ctx)) nodes := make([]v1.Node, 0) names := flags[model.ResourceNamesFlag.Name] if names != "" { nameArr := strings.Split(names, ",") for _, name := range nameArr { node := v1.Node{} err := client2.Get(context.TODO(), types.NamespacedName{Name: name}, &node) if err != nil { // Skip the invalid name logrusField.Warningf("can not find the node by %s name, %v", name, err) continue } if len(requirements) > 0 { if model.MapContains(node.Labels, requirements) { nodes = append(nodes, node) } } else { nodes = append(nodes, node) } } logrusField.Infof("get nodes by name %s, len is %d", names, len(nodes)) if len(nodes) == 0 { return nodes, spec.ResponseFailWithFlags(spec.ParameterInvalidK8sNodeQuery, names) } return nodes, spec.Success() } if labels != "" && len(requirements) == 0 { logrusField.Warningln(spec.ParameterIllegal.Sprintf(model.ResourceLabelsFlag.Name, labels, "illegal labels")) return nodes, spec.ResponseFailWithFlags(spec.ParameterIllegal, model.ResourceLabelsFlag.Name, labels, "illegal labels") } if len(requirements) > 0 { nodeList := v1.NodeList{} selector := pkglabels.NewSelector().Add(requirements...) opts := client.ListOptions{LabelSelector: selector} err := client2.List(context.TODO(), &nodeList, &opts) if err != nil { return nodes, spec.ResponseFailWithFlags(spec.K8sExecFailed, "ListNode", err) } nodes = nodeList.Items logrusField.Infof("get nodes by labels %s, len is %d", labels, len(nodes)) } if len(nodes) == 0 { return nodes, spec.ResponseFailWithFlags(spec.ParameterInvalidK8sNodeQuery, labels) } return nodes, spec.Success() } ================================================ FILE: exec/node/node.go ================================================ /* * Copyright 1999-2020 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package node import ( "fmt" "strings" "github.com/chaosblade-io/chaosblade-exec-os/exec/cpu" "github.com/chaosblade-io/chaosblade-exec-os/exec/disk" "github.com/chaosblade-io/chaosblade-exec-os/exec/file" "github.com/chaosblade-io/chaosblade-exec-os/exec/mem" osModel "github.com/chaosblade-io/chaosblade-exec-os/exec/model" "github.com/chaosblade-io/chaosblade-exec-os/exec/network" "github.com/chaosblade-io/chaosblade-exec-os/exec/network/tc" "github.com/chaosblade-io/chaosblade-exec-os/exec/process" "github.com/chaosblade-io/chaosblade-exec-os/exec/script" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" ) type ResourceModelSpec struct { model.BaseResourceExpModelSpec } func NewResourceModelSpec(client *channel.Client) model.ResourceExpModelSpec { modelSpec := &ResourceModelSpec{ model.NewBaseResourceExpModelSpec("node", client), } osModelSpecs := model.NewOSSubResourceModelSpec().ExpModels() spec.AddExecutorToModelSpec(&model.CommonExecutor{Client: client}, osModelSpecs...) selfModelSpec := NewSelfExpModelCommandSpec() expModelSpecs := append(osModelSpecs, selfModelSpec) spec.AddFlagsToModelSpec(getResourceFlags, expModelSpecs...) spec.AddFlagsToModelSpec(osModel.GetSSHExpFlags, expModelSpecs...) modelSpec.RegisterExpModels(osModelSpecs...) // Register CNI fault injection model spec cniModelSpec := NewCniExpModelCommandSpec(client) spec.AddFlagsToModelSpec(getResourceFlags, cniModelSpec) modelSpec.RegisterExpModels(cniModelSpec) addActionExamples(modelSpec) return modelSpec } func addActionExamples(modelSpec *ResourceModelSpec) { for _, expModelSpec := range modelSpec.ExpModelSpecs { for _, action := range expModelSpec.Actions() { v := interface{}(action) switch v.(type) { case *cpu.FullLoadActionCommand: action.SetLongDesc("The CPU load experiment scenario for k8s node") action.SetExample( `# Create a CPU full load experiment in the node ## using SSH channel blade create k8s node-cpu load --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-cpu load --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 #Specifies two random kernel's full load in the node ## using SSH channel blade create k8s node-cpu load --cpu-percent 60 --cpu-count 2 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-cpu load --cpu-percent 60 --cpu-count 2 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Specifies that the kernel is full load with index 0, 3, and that the kernel's index starts at 0 ## using SSH channel blade create k8s node-cpu load --cpu-list 0,3 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-cpu load --cpu-list 0,3 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Specify the kernel full load of indexes 1-3 ## using SSH channel blade create k8s node-cpu load --cpu-list 1-3 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-cpu load --cpu-list 1-3 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Specified percentage load in the node ## using SSH channel blade create k8s node-cpu load --cpu-percent 60 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-cpu load --cpu-percent 60 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`, ) case *tc.DelayActionSpec: action.SetLongDesc(` The network delay experiment scenario for k8s node. !!! Using DaemonSet may result in failure to use the kubernetes API for destroy experiment. !!! Please use caution, add a timeout parameter for automatic destroy, or use the SSH channel. `) action.SetExample( `# Access to native 8080 and 8081 ports is delayed by 3 seconds, and the delay time fluctuates by 1 second ## using SSH channel blade create k8s node-network delay --time 3000 --offset 1000 --interface eth0 --local-port 8080,8081 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network delay --time 3000 --offset 1000 --interface eth0 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Local access to external 14.215.177.39 machine (ping www.baidu.com obtained IP) port 80 delay of 3 seconds ## using SSH channel blade create k8s node-network delay --time 3000 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network delay --time 3000 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Do a 5 second delay for the entire network card eth0, excluding ports 22 and 8000 to 8080 ## using SSH channel blade create k8s node-network delay --time 5000 --interface eth0 --exclude-port 22,8000-8080 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network delay --time 5000 --interface eth0 --exclude-port 22,8000-8080 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`, ) case *network.DropActionSpec: action.SetLongDesc(`!!! Using DaemonSet may result in failure to use the kubernetes API for destroy experiment. !!! Please use caution, add a timeout parameter for automatic destroy, or use the SSH channel.`) action.SetExample( `# Experimental scenario of network shielding ## using SSH channel blade create k8s node-network drop --source-port 80 --network-traffic in --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network drop --source-port 80 --network-traffic in --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`, ) case *network.DnsActionSpec: action.SetLongDesc(` !!! Using DaemonSet may result in failure to use the kubernetes API for destroy experiment. !!! Please use caution, add a timeout parameter for automatic destroy, or use the SSH channel.`) action.SetExample( `# The domain name www.baidu.com is not accessible ## using SSH channel blade create k8s node-network dns --domain www.baidu.com --ip 10.0.0.0 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network dns --domain www.baidu.com --ip 10.0.0.0 --channel ssh --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`, ) case *tc.LossActionSpec: action.SetLongDesc(` !!! Using DaemonSet may result in failure to use the kubernetes API for destroy experiment. !!! Please use caution, add a timeout parameter for automatic destroy, or use the SSH channel.`) action.SetExample(`# Access to native 8080 and 8081 ports lost 70% of packets ## using SSH channel blade create k8s node-network loss --percent 70 --interface eth0 --local-port 8080,8081 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network loss --percent 70 --interface eth0 --local-port 8080,8081 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # The machine accesses external 14.215.177.39 machine (ping www.baidu.com) 80 port packet loss rate 100% ## using SSH channel blade create k8s node-network loss --percent 100 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network loss --percent 100 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Do 60% packet loss for the entire network card Eth0, excluding ports 22 and 8000 to 8080 ## using SSH channel blade create k8s node-network loss --percent 60 --interface eth0 --exclude-port 22,8000-8080 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network loss --percent 60 --interface eth0 --exclude-port 22,8000-8080 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Realize the whole network card is not accessible, not accessible time 20 seconds. After executing the following command, the current network is disconnected and restored in 20 seconds. Remember!! Don't forget -timeout parameter ## using SSH channel blade create k8s node-network loss --percent 100 --interface eth0 --timeout 20 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network loss --percent 100 --interface eth0 --timeout 20 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config`) case *tc.DuplicateActionSpec: action.SetLongDesc(` !!! Using DaemonSet may result in failure to use the kubernetes API for destroy experiment. !!! Please use caution, add a timeout parameter for automatic destroy, or use the SSH channel.`) action.SetExample(`# Specify the network card eth0 and repeat the packet by 10% ## using SSH channel blade create k8s node-network duplicate --percent=10 --interface=eth0 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network duplicate --percent=10 --interface=eth0 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`) case *tc.CorruptActionSpec: action.SetLongDesc(` !!! Using DaemonSet may result in failure to use the kubernetes API for destroy experiment. !!! Please use caution, add a timeout parameter for automatic destroy, or use the SSH channel.`) action.SetExample(`# Access to the specified IP request packet is corrupted, 80% of the time ## using SSH channel blade create k8s node-network corrupt --percent 80 --destination-ip 180.101.49.12 --interface eth0 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network corrupt --percent 80 --destination-ip 180.101.49.12 --interface eth0 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`) case *tc.ReorderActionSpec: action.SetLongDesc(` !!! Using DaemonSet may result in failure to use the kubernetes API for destroy experiment. !!! Please use caution, add a timeout parameter for automatic destroy, or use the SSH channel.`) action.SetExample(`# Access the specified IP request packet disorder ## using SSH channel blade create k8s node-network reorder --correlation 80 --percent 50 --gap 2 --time 500 --interface eth0 --destination-ip 180.101.49.12 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network reorder --correlation 80 --percent 50 --gap 2 --time 500 --interface eth0 --destination-ip 180.101.49.12 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`) case *network.OccupyActionSpec: action.SetLongDesc(` !!! Using DaemonSet may result in failure to use the kubernetes API for destroy experiment. !!! Please use caution, add a timeout parameter for automatic destroy, or use the SSH channel.`) action.SetExample(`#Specify port 8080 occupancy ## using SSH channel blade create k8s node-network occupy --port 8080 --force --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network occupy --port 8080 --force --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # The machine accesses external 14.215.177.39 machine (ping www.baidu.com) 80 port packet loss rate 100% ## using SSH channel blade create k8s node-network loss --percent 100 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-network loss --percent 100 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`) case *process.KillProcessActionCommandSpec: action.SetLongDesc("The process scenario in container is the same as the basic resource process scenario") action.SetExample( ` # Kill the nginx process in the node ## using SSH channel blade create k8s node-process kill --process nginx --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-process kill --process nginx --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Specifies the signal and local port to kill the process in the node ## using SSH channel blade create k8s node-process kill --local-port 8080 --signal 15 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-process kill --local-port 8080 --signal 15 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`, ) case *process.StopProcessActionCommandSpec: action.SetLongDesc("The process scenario in container is the same as the basic resource process scenario") action.SetExample( ` # Pause the process that contains the "nginx" keyword in the node ## using SSH channel blade create k8s node-process stop --process nginx --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-process stop --process nginx --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Pause the Java process in the node ## using SSH channel blade create k8s node-process stop --process-cmd java --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-process stop --process-cmd java --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`, ) case *disk.FillActionSpec: action.SetLongDesc("The disk fill scenario experiment in the node") action.SetExample( ` # Fill the /home directory with 40G of disk space in the node ## using SSH channel blade create k8s node-disk fill --path /home --size 40000 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-disk fill --path /home --size 40000 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Fill the /home directory with 80% of the disk space in the node and retains the file handle that populates the disk ## using SSH channel blade create k8s node-disk fill --path /home --percent 80 --retain-handle --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-disk fill --path /home --percent 80 --retain-handle --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Perform a fixed-size experimental scenario in the node ## using SSH channel blade c k8s node-disk fill --path /home --reserve 1024 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade c k8s node-disk fill --path /home --reserve 1024 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`, ) case *disk.BurnActionSpec: action.SetLongDesc("Disk read and write IO load experiment in the node") action.SetExample( `# The data of rkB/s, wkB/s and % Util were mainly observed. Perform disk read IO high-load scenarios ## using SSH channel blade create k8s node-disk burn --read --path /home --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-disk burn --read --path /home --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Perform disk write IO high-load scenarios ## using SSH channel blade create k8s node-disk burn --write --path /home --channel ssh --ssh-host 192.168.1.100 --ssh-user root8 ## using DaemonSet blade create k8s node-disk burn --write --path /home --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Read and write IO load scenarios are performed at the same time. Path is not specified. The default is ## using SSH channel blade create k8s node-disk burn --read --write --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-disk burn --read --write --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`, ) case *disk.UnmountStuckActionSpec: action.SetLongDesc("Simulate volume unmount stuck by holding file handles in the node") action.SetExample( `# Simulate volume unmount stuck on /mnt/data in the node ## using SSH channel blade create k8s node-disk unmount_stuck --path /mnt/data --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-disk unmount_stuck --path /mnt/data --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`, ) case *mem.MemLoadActionCommand: action.SetLongDesc("The memory fill experiment scenario in container") action.SetExample( `# The execution memory footprint is 50% ## using SSH channel blade create k8s node-mem load --mode ram --mem-percent 50 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-mem load --mode ram --mem-percent 50 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # The execution memory footprint is 50%, cache model ## using SSH channel blade create k8s node-mem load --mode cache --mem-percent 50 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-mem load --mode cache --mem-percent 50 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # The execution memory footprint is 50%, usage contains buffer/cache ## using SSH channel blade create k8s node-mem load --mode ram --mem-percent 50 --include-buffer-cache --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-mem load --mode ram --mem-percent 50 --include-buffer-cache --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # The execution memory footprint is 50% for 200 seconds ## using SSH channel blade create k8s node-mem load --mode ram --mem-percent 50 --timeout 200 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-mem load --mode ram --mem-percent 50 --timeout 200 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config # 200M memory is reserved ## using SSH channel blade create k8s node-mem load --mode ram --reserve 200 --rate 100 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-mem load --mode ram --reserve 200 --rate 100 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`, ) case *file.FileAppendActionSpec: action.SetLongDesc("The file append experiment scenario in container") action.SetExample( `# Appends the content "HELLO WORLD" to the /home/logs/nginx.log file blade create k8s node-file append --filepath=/home/logs/nginx.log --content="HELL WORLD" --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Appends the content "HELLO WORLD" to the /home/logs/nginx.log file, interval 10 seconds blade create k8s node-file append --filepath=/home/logs/nginx.log --content="HELL WORLD" --interval 10 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Appends the content "HELLO WORLD" to the /home/logs/nginx.log file, enable base64 encoding blade create k8s node-file append --filepath=/home/logs/nginx.log --content=SEVMTE8gV09STEQ= --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # mock interface timeout exception blade create k8s node-file append --filepath=/home/logs/nginx.log --content="@{DATE:+%Y-%m-%d %H:%M:%S} ERROR invoke getUser timeout [@{RANDOM:100-200}]ms abc mock exception" --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default `, ) case *file.FileAddActionSpec: action.SetLongDesc("The file add experiment scenario in container") action.SetExample( `# Create a file named nginx.log in the /home directory blade create k8s node-file add --filepath /home/nginx.log --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Create a file named nginx.log in the /home directory with the contents of HELLO WORLD blade create k8s node-file add --filepath /home/nginx.log --content "HELLO WORLD" --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Create a file named nginx.log in the /temp directory and automatically create directories that don't exist blade create k8s node-file add --filepath /temp/nginx.log --auto-create-dir --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Create a directory named /nginx in the /temp directory and automatically create directories that don't exist blade create k8s node-file add --directory --filepath /temp/nginx --auto-create-dir --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default `, ) case *file.FileChmodActionSpec: action.SetLongDesc("The file permission modification scenario in container") action.SetExample(`# Modify /home/logs/nginx.log file permissions to 777 blade create k8s node-file chmod --filepath /home/logs/nginx.log --mark=777 --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default `) case *file.FileDeleteActionSpec: action.SetLongDesc("The file delete scenario in container") action.SetExample( `# Delete the file /home/logs/nginx.log blade create k8s node-file delete --filepath /home/logs/nginx.log --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Force delete the file /home/logs/nginx.log unrecoverable blade create k8s node-file delete --filepath /home/logs/nginx.log --force --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default `, ) case *file.FileMoveActionSpec: action.SetExample("The file move scenario in container") action.SetExample(`# Move the file /home/logs/nginx.log to /tmp blade create k8s node-file move --filepath /home/logs/nginx.log --target /tmp --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Force Move the file /home/logs/nginx.log to /temp blade create k8s node-file move --filepath /home/logs/nginx.log --target /tmp --force --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default # Move the file /home/logs/nginx.log to /temp/ and automatically create directories that don't exist blade create k8s node-file move --filepath /home/logs/nginx.log --target /temp --auto-create-dir --names nginx-app --container-ids f1de335b4eeaf --kubeconfig ~/.kube/config --namespace default `) case *file.FileFdleakActionCommandSpec: action.SetLongDesc("The file descriptor leak experiment scenario in the node, which causes disk space usage to increase") action.SetExample( `# Occupy about 50% of disk space with a leaked unlinked file in the node ## using SSH channel blade create k8s node-file fdleak --percent 50 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-file fdleak --percent 50 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30 # Occupy about 80% of disk space in the /tmp directory ## using SSH channel blade create k8s node-file fdleak --percent 80 --directory /tmp --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-file fdleak --percent 80 --directory /tmp --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`, ) case *script.ScriptDelayActionCommand: action.SetExample(` # Add commands to the script "start0() { sleep 10.000000 ...}" ## using SSH channel blade create k8s node-script delay --time 10000 --file test.sh --function-name start0 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-script delay --time 10000 --file test.sh --function-name start0 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`) case *script.ScriptExitActionCommand: action.SetExample(` # Add commands to the script "start0() { echo this-is-error-message; exit 1; ... }" ## using SSH channel blade create k8s node-script exit --exit-code 1 --exit-message this-is-error-message --file test.sh --function-name start0 --channel ssh --ssh-host 192.168.1.100 --ssh-user root ## using DaemonSet blade create k8s node-script exit --exit-code 1 --exit-message this-is-error-message --file test.sh --function-name start0 --names izbp1a4jchbdwkwi5hk7ekz --kubeconfig ~/.kube/config --timeout 30`) default: action.SetExample(strings.Replace( action.Example(), fmt.Sprintf("blade create %s %s", expModelSpec.Name(), action.Name()), fmt.Sprintf("blade create k8s node-%s %s --names nginx-app --channel ssh --ssh-host 192.168.1.100 --ssh-user root", expModelSpec.Name(), action.Name()), -1, )) action.SetExample(strings.Replace( action.Example(), fmt.Sprintf("blade c %s %s", expModelSpec.Name(), action.Name()), fmt.Sprintf("blade c k8s node-%s %s --names nginx-app --channel ssh --ssh-host 192.168.1.100 --ssh-user root", expModelSpec.Name(), action.Name()), -1, )) } } } } func getResourceFlags() []spec.ExpFlagSpec { coverageFlags := model.GetResourceCoverageFlags() return append(coverageFlags, model.ResourceNamesFlag, model.ResourceLabelsFlag) } func NewSelfExpModelCommandSpec() spec.ExpModelCommandSpec { return &SelfExpModelCommandSpec{ spec.BaseExpModelCommandSpec{ ExpFlags: []spec.ExpFlagSpec{}, ExpActions: []spec.ExpActionCommandSpec{ // TODO // NewCordonActionCommandSpec(), }, }, } } type SelfExpModelCommandSpec struct { spec.BaseExpModelCommandSpec } func (*SelfExpModelCommandSpec) Name() string { return "node" } func (*SelfExpModelCommandSpec) ShortDesc() string { return "Node resource experiment for itself, for example cpu load" } func (*SelfExpModelCommandSpec) LongDesc() string { return "Node resource experiment for itself, for example cpu load" } func (*SelfExpModelCommandSpec) Example() string { return "blade c k8s node-cpu load --evict-count 1 --kubeconfig ~/.kube/config --names cn-hangzhou.192.168.0.205" } ================================================ FILE: exec/pod/badresourcesize.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "encoding/json" "fmt" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ( BadResourceSizeCPUFlag = "cpu" BadResourceSizeMemFlag = "mem" ChaosBladeOriginalResourcesAnnotation = "chaosblade.io/original-resources" ChaosBladeBadResourceSizeAction = "badresourcesize" ) type BadResourceSizeActionSpec struct { spec.BaseExpActionCommandSpec client *channel.Client } func NewBadResourceSizeActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &BadResourceSizeActionSpec{ BaseExpActionCommandSpec: spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: "workload-type", Desc: "Workload type: deployment, daemonset, statefulset. Default: deployment", Required: false, Default: "deployment", }, &spec.ExpFlag{ Name: "workload-name", Desc: "Workload name to modify resource size", Required: true, }, &spec.ExpFlag{ Name: BadResourceSizeCPUFlag, Desc: "CPU resource limit to set, e.g. 1m, 5m", Required: false, }, &spec.ExpFlag{ Name: BadResourceSizeMemFlag, Desc: "Memory resource limit to set, e.g. 128m, 256m", Required: false, }, }, ActionExecutor: &BadResourceSizeActionExecutor{client: client}, ActionExample: `# Set CPU resource limit for a deployment blade create k8s pod-pod badresourcesize --namespace default --workload-type deployment --workload-name nginx-app --cpu 1m --kubeconfig ~/.kube/config # Set memory resource limit for a deployment blade create k8s pod-pod badresourcesize --namespace default --workload-type deployment --workload-name nginx-app --mem 128m --kubeconfig ~/.kube/config # Set both CPU and memory resource limits for a deployment blade create k8s pod-pod badresourcesize --namespace default --workload-type deployment --workload-name nginx-app --cpu 1m --mem 128m --kubeconfig ~/.kube/config # Set resource limits for a statefulset blade create k8s pod-pod badresourcesize --namespace default --workload-type statefulset --workload-name redis-app --cpu 1m --mem 128m --kubeconfig ~/.kube/config # Set resource limits for a daemonset blade create k8s pod-pod badresourcesize --namespace default --workload-type daemonset --workload-name fluentd --cpu 1m --mem 128m --kubeconfig ~/.kube/config `, ActionCategories: []string{model.CategorySystemContainer}, }, client: client, } } func (*BadResourceSizeActionSpec) Name() string { return "badresourcesize" } func (*BadResourceSizeActionSpec) Aliases() []string { return []string{} } func (*BadResourceSizeActionSpec) ShortDesc() string { return "Modify workload pod resource limits to simulate bad resource sizing" } func (*BadResourceSizeActionSpec) LongDesc() string { return "Modify the CPU/Memory resource limits of a workload (Deployment/DaemonSet/StatefulSet) " + "to simulate incorrect resource sizing. The original resource configuration is backed up " + "in an annotation and restored when the experiment is destroyed. " + "Existing container-level and pod-level resource settings are removed, " + "and new pod-level resource limits are applied." } // PreCreate implements model.ActionPreProcessor. func (a *BadResourceSizeActionSpec) PreCreate(ctx context.Context, expModel *spec.ExpModel, client *channel.Client) (context.Context, *spec.Response) { namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] if namespace == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if strings.Contains(namespace, ",") { return ctx, spec.ResponseFailWithFlags(spec.ParameterInvalidNSNotOne, model.ResourceNamespaceFlag.Name) } if workloadName == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, "workload-name") } cpuVal := expModel.ActionFlags[BadResourceSizeCPUFlag] memVal := expModel.ActionFlags[BadResourceSizeMemFlag] if cpuVal == "" && memVal == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, "cpu or mem (at least one is required)") } containerObjectMetaList := model.ContainerMatchedList{ model.ContainerObjectMeta{ Namespace: namespace, PodName: fmt.Sprintf("chaosblade-brs-%s-%s", workloadType, workloadName), }, } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } // PreDestroy implements model.ActionPreProcessor. func (a *BadResourceSizeActionSpec) PreDestroy(ctx context.Context, expModel *spec.ExpModel, client *channel.Client, oldExpStatus v1alpha1.ExperimentStatus) (context.Context, *spec.Response) { namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] if namespace == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if strings.Contains(namespace, ",") { return ctx, spec.ResponseFailWithFlags(spec.ParameterInvalidNSNotOne, model.ResourceNamespaceFlag.Name) } if workloadName == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, "workload-name") } containerObjectMetaList := model.ContainerMatchedList{ model.ContainerObjectMeta{ Namespace: namespace, PodName: fmt.Sprintf("chaosblade-brs-%s-%s", workloadType, workloadName), }, } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } type BadResourceSizeActionExecutor struct { client *channel.Client } func (*BadResourceSizeActionExecutor) Name() string { return "badresourcesize" } func (*BadResourceSizeActionExecutor) SetChannel(channel spec.Channel) {} func (d *BadResourceSizeActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(uid, ctx, expModel) } return d.create(uid, ctx, expModel) } func (d *BadResourceSizeActionExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] cpuVal := expModel.ActionFlags[BadResourceSizeCPUFlag] memVal := expModel.ActionFlags[BadResourceSizeMemFlag] if namespace == "" { util.Errorf(uid, util.GetRunFuncName(), "namespace is required") return spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if workloadName == "" { util.Errorf(uid, util.GetRunFuncName(), "workload-name is required") return spec.ResponseFailWithFlags(spec.ParameterLess, "workload-name") } if cpuVal == "" && memVal == "" { util.Errorf(uid, util.GetRunFuncName(), "at least one of cpu or mem is required") return spec.ResponseFailWithFlags(spec.ParameterLess, "cpu or mem") } newLimits, err := buildResourceLimits(cpuVal, memVal) if err != nil { util.Errorf(uid, util.GetRunFuncName(), fmt.Sprintf("parse resource values failed: %v", err)) return spec.ResponseFailWithFlags(spec.ParameterIllegal, "cpu/mem", err.Error()) } status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: fmt.Sprintf("%s//%s//%s", namespace, workloadType, workloadName), } switch workloadType { case "deployment": deployment := &appsv1.Deployment{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, deployment) if err != nil { if apierrors.IsNotFound(err) { logrusField.Warningf("deployment %s/%s not found", namespace, workloadName) status = status.CreateFailResourceStatus(fmt.Sprintf("deployment not found: %v", err), spec.K8sExecFailed.Code) } else { logrusField.Warningf("get deployment %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("get deployment failed: %v", err), spec.K8sExecFailed.Code) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } if err := d.injectDeploymentBadResourceSize(ctx, deployment, newLimits, experimentId); err != nil { logrusField.Warningf("inject bad resource size to deployment %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("inject bad resource size failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("injected bad resource size to deployment %s/%s", namespace, workloadName) case "daemonset": daemonset := &appsv1.DaemonSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, daemonset) if err != nil { if apierrors.IsNotFound(err) { logrusField.Warningf("daemonset %s/%s not found", namespace, workloadName) status = status.CreateFailResourceStatus(fmt.Sprintf("daemonset not found: %v", err), spec.K8sExecFailed.Code) } else { logrusField.Warningf("get daemonset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("get daemonset failed: %v", err), spec.K8sExecFailed.Code) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } if err := d.injectDaemonSetBadResourceSize(ctx, daemonset, newLimits, experimentId); err != nil { logrusField.Warningf("inject bad resource size to daemonset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("inject bad resource size failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("injected bad resource size to daemonset %s/%s", namespace, workloadName) case "statefulset": statefulset := &appsv1.StatefulSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, statefulset) if err != nil { if apierrors.IsNotFound(err) { logrusField.Warningf("statefulset %s/%s not found", namespace, workloadName) status = status.CreateFailResourceStatus(fmt.Sprintf("statefulset not found: %v", err), spec.K8sExecFailed.Code) } else { logrusField.Warningf("get statefulset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("get statefulset failed: %v", err), spec.K8sExecFailed.Code) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } if err := d.injectStatefulSetBadResourceSize(ctx, statefulset, newLimits, experimentId); err != nil { logrusField.Warningf("inject bad resource size to statefulset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("inject bad resource size failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("injected bad resource size to statefulset %s/%s", namespace, workloadName) default: status = status.CreateFailResourceStatus(fmt.Sprintf("unsupported workload type: %s", workloadType), spec.ParameterIllegal.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } status = status.CreateSuccessResourceStatus() return spec.ReturnResultIgnoreCode(v1alpha1.CreateSuccessExperimentStatus([]v1alpha1.ResourceStatus{status})) } func (d *BadResourceSizeActionExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: fmt.Sprintf("%s//%s//%s", namespace, workloadType, workloadName), } switch workloadType { case "deployment": deployment := &appsv1.Deployment{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, deployment) if resp, handled := handleGetError(err, namespace, workloadType, workloadName, &status, logrusField); handled { return resp } if err := d.restoreDeploymentResources(ctx, deployment, experimentId); err != nil { logrusField.Warningf("restore deployment %s/%s resources failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("restore deployment resources failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("restored deployment %s/%s resources", namespace, workloadName) case "daemonset": daemonset := &appsv1.DaemonSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, daemonset) if resp, handled := handleGetError(err, namespace, workloadType, workloadName, &status, logrusField); handled { return resp } if err := d.restoreDaemonSetResources(ctx, daemonset, experimentId); err != nil { logrusField.Warningf("restore daemonset %s/%s resources failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("restore daemonset resources failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("restored daemonset %s/%s resources", namespace, workloadName) case "statefulset": statefulset := &appsv1.StatefulSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, statefulset) if resp, handled := handleGetError(err, namespace, workloadType, workloadName, &status, logrusField); handled { return resp } if err := d.restoreStatefulSetResources(ctx, statefulset, experimentId); err != nil { logrusField.Warningf("restore statefulset %s/%s resources failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("restore statefulset resources failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("restored statefulset %s/%s resources", namespace, workloadName) default: status = status.CreateFailResourceStatus(fmt.Sprintf("unsupported workload type: %s", workloadType), spec.ParameterIllegal.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } status = status.CreateSuccessResourceStatus() status.State = v1alpha1.DestroyedState return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{status})) } // containerResourcesBackup holds the original resource settings for all containers in a pod template, // keyed by container name for resilience against container additions/removals during the experiment. type containerResourcesBackup struct { // ResourcesByName maps container name to its original ResourceRequirements. ResourcesByName map[string]v1.ResourceRequirements `json:"resourcesByName"` // InitResourcesByName maps init container name to its original ResourceRequirements. InitResourcesByName map[string]v1.ResourceRequirements `json:"initResourcesByName,omitempty"` } // buildResourceLimits parses the cpu/mem flag values into a v1.ResourceList for Limits. func buildResourceLimits(cpuVal, memVal string) (v1.ResourceList, error) { limits := v1.ResourceList{} if cpuVal != "" { q, err := resource.ParseQuantity(cpuVal) if err != nil { return nil, fmt.Errorf("invalid cpu value %q: %v", cpuVal, err) } limits[v1.ResourceCPU] = q } if memVal != "" { memQuantity := normalizeMemoryValue(memVal) q, err := resource.ParseQuantity(memQuantity) if err != nil { return nil, fmt.Errorf("invalid mem value %q: %v", memVal, err) } limits[v1.ResourceMemory] = q } return limits, nil } // normalizeMemoryValue converts shorthand like "128m" to the Kubernetes-compatible "128Mi". // Kubernetes uses binary suffixes (Ki, Mi, Gi) for memory. // If the value already uses a standard suffix or is purely numeric, it is returned as-is. func normalizeMemoryValue(val string) string { if len(val) == 0 { return val } lastChar := val[len(val)-1] if lastChar == 'm' || lastChar == 'M' { prefix := val[:len(val)-1] if _, err := resource.ParseQuantity(prefix); err == nil { return prefix + "Mi" } } if lastChar == 'g' || lastChar == 'G' { prefix := val[:len(val)-1] if _, err := resource.ParseQuantity(prefix); err == nil { return prefix + "Gi" } } if lastChar == 'k' || lastChar == 'K' { prefix := val[:len(val)-1] if _, err := resource.ParseQuantity(prefix); err == nil { return prefix + "Ki" } } return val } // backupAndInjectResources backs up original container resources (keyed by container name) // for both regular and init containers, then sets new resource limits on each container. func backupAndInjectResources(podSpec *v1.PodSpec, annotations map[string]string, newLimits v1.ResourceList) error { backup := containerResourcesBackup{ ResourcesByName: make(map[string]v1.ResourceRequirements, len(podSpec.Containers)), } for _, c := range podSpec.Containers { backup.ResourcesByName[c.Name] = *c.Resources.DeepCopy() } if len(podSpec.InitContainers) > 0 { backup.InitResourcesByName = make(map[string]v1.ResourceRequirements, len(podSpec.InitContainers)) for _, c := range podSpec.InitContainers { backup.InitResourcesByName[c.Name] = *c.Resources.DeepCopy() } } backupBytes, err := json.Marshal(backup) if err != nil { return fmt.Errorf("marshal original resources failed: %v", err) } annotations[ChaosBladeOriginalResourcesAnnotation] = string(backupBytes) for i := range podSpec.Containers { podSpec.Containers[i].Resources = v1.ResourceRequirements{ Limits: newLimits.DeepCopy(), } } for i := range podSpec.InitContainers { podSpec.InitContainers[i].Resources = v1.ResourceRequirements{ Limits: newLimits.DeepCopy(), } } return nil } // restoreResources restores original container resources from the backup annotation. // It uses best-effort matching by container name: containers that exist in both the // backup and the current spec are restored; new containers (not in backup) are left // untouched; removed containers (in backup but not in spec) are logged as warnings. func restoreResources(podSpec *v1.PodSpec, annotations map[string]string) error { backupStr, ok := annotations[ChaosBladeOriginalResourcesAnnotation] if !ok || backupStr == "" { return fmt.Errorf("original resources backup annotation not found") } var backup containerResourcesBackup if err := json.Unmarshal([]byte(backupStr), &backup); err != nil { return fmt.Errorf("unmarshal original resources failed: %v", err) } if len(backup.ResourcesByName) == 0 { return fmt.Errorf("backup contains no container resources") } restored := make(map[string]bool, len(backup.ResourcesByName)) for i := range podSpec.Containers { name := podSpec.Containers[i].Name if orig, found := backup.ResourcesByName[name]; found { podSpec.Containers[i].Resources = orig restored[name] = true } else { logrus.Warnf("container %q not found in backup, leaving its resources unchanged", name) } } for name := range backup.ResourcesByName { if !restored[name] { logrus.Warnf("backed-up container %q no longer exists in pod spec, skipping restore", name) } } if len(backup.InitResourcesByName) > 0 { restoredInit := make(map[string]bool, len(backup.InitResourcesByName)) for i := range podSpec.InitContainers { name := podSpec.InitContainers[i].Name if orig, found := backup.InitResourcesByName[name]; found { podSpec.InitContainers[i].Resources = orig restoredInit[name] = true } else { logrus.Warnf("init container %q not found in backup, leaving its resources unchanged", name) } } for name := range backup.InitResourcesByName { if !restoredInit[name] { logrus.Warnf("backed-up init container %q no longer exists in pod spec, skipping restore", name) } } } return nil } func (d *BadResourceSizeActionExecutor) injectDeploymentBadResourceSize(ctx context.Context, deployment *appsv1.Deployment, newLimits v1.ResourceList, experimentId string) error { if deployment.Annotations == nil { deployment.Annotations = make(map[string]string) } if err := ensureNoConflictingExperiment(deployment.Annotations, experimentId); err != nil { return err } if deployment.Annotations[ChaosBladeExperimentAnnotation] == experimentId { return nil } deployment.Annotations[ChaosBladeDeploymentAnnotation] = ChaosBladeBadResourceSizeAction deployment.Annotations[ChaosBladeExperimentAnnotation] = experimentId if err := backupAndInjectResources(&deployment.Spec.Template.Spec, deployment.Annotations, newLimits); err != nil { return err } return d.client.Update(ctx, deployment) } func (d *BadResourceSizeActionExecutor) injectDaemonSetBadResourceSize(ctx context.Context, daemonset *appsv1.DaemonSet, newLimits v1.ResourceList, experimentId string) error { if daemonset.Annotations == nil { daemonset.Annotations = make(map[string]string) } if err := ensureNoConflictingExperiment(daemonset.Annotations, experimentId); err != nil { return err } if daemonset.Annotations[ChaosBladeExperimentAnnotation] == experimentId { return nil } daemonset.Annotations[ChaosBladeDaemonSetAnnotation] = ChaosBladeBadResourceSizeAction daemonset.Annotations[ChaosBladeExperimentAnnotation] = experimentId if err := backupAndInjectResources(&daemonset.Spec.Template.Spec, daemonset.Annotations, newLimits); err != nil { return err } return d.client.Update(ctx, daemonset) } func (d *BadResourceSizeActionExecutor) injectStatefulSetBadResourceSize(ctx context.Context, statefulset *appsv1.StatefulSet, newLimits v1.ResourceList, experimentId string) error { if statefulset.Annotations == nil { statefulset.Annotations = make(map[string]string) } if err := ensureNoConflictingExperiment(statefulset.Annotations, experimentId); err != nil { return err } if statefulset.Annotations[ChaosBladeExperimentAnnotation] == experimentId { return nil } statefulset.Annotations[ChaosBladeStatefulSetAnnotation] = ChaosBladeBadResourceSizeAction statefulset.Annotations[ChaosBladeExperimentAnnotation] = experimentId if err := backupAndInjectResources(&statefulset.Spec.Template.Spec, statefulset.Annotations, newLimits); err != nil { return err } return d.client.Update(ctx, statefulset) } func (d *BadResourceSizeActionExecutor) restoreDeploymentResources(ctx context.Context, deployment *appsv1.Deployment, experimentId string) error { if deployment.Annotations[ChaosBladeExperimentAnnotation] != experimentId { return fmt.Errorf("deployment was not modified by experiment %s", experimentId) } if err := restoreResources(&deployment.Spec.Template.Spec, deployment.Annotations); err != nil { return err } delete(deployment.Annotations, ChaosBladeDeploymentAnnotation) delete(deployment.Annotations, ChaosBladeExperimentAnnotation) delete(deployment.Annotations, ChaosBladeOriginalResourcesAnnotation) return d.client.Update(ctx, deployment) } func (d *BadResourceSizeActionExecutor) restoreDaemonSetResources(ctx context.Context, daemonset *appsv1.DaemonSet, experimentId string) error { if daemonset.Annotations[ChaosBladeExperimentAnnotation] != experimentId { return fmt.Errorf("daemonset was not modified by experiment %s", experimentId) } if err := restoreResources(&daemonset.Spec.Template.Spec, daemonset.Annotations); err != nil { return err } delete(daemonset.Annotations, ChaosBladeDaemonSetAnnotation) delete(daemonset.Annotations, ChaosBladeExperimentAnnotation) delete(daemonset.Annotations, ChaosBladeOriginalResourcesAnnotation) return d.client.Update(ctx, daemonset) } func (d *BadResourceSizeActionExecutor) restoreStatefulSetResources(ctx context.Context, statefulset *appsv1.StatefulSet, experimentId string) error { if statefulset.Annotations[ChaosBladeExperimentAnnotation] != experimentId { return fmt.Errorf("statefulset was not modified by experiment %s", experimentId) } if err := restoreResources(&statefulset.Spec.Template.Spec, statefulset.Annotations); err != nil { return err } delete(statefulset.Annotations, ChaosBladeStatefulSetAnnotation) delete(statefulset.Annotations, ChaosBladeExperimentAnnotation) delete(statefulset.Annotations, ChaosBladeOriginalResourcesAnnotation) return d.client.Update(ctx, statefulset) } ================================================ FILE: exec/pod/configmapdeleteexp.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "crypto/sha256" "encoding/json" "fmt" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ( ConfigMapNameFlag = "configmap-name" ChaosBladeExperimentLabel = "chaosblade.io/experiment-id" ChaosBladeBackupLabel = "chaosblade.io/backup" ChaosBladeOriginalNameAnn = "chaosblade.io/original-name" ChaosBladeOriginalNamespaceAnn = "chaosblade.io/original-namespace" ChaosBladeOriginalLabelsAnn = "chaosblade.io/original-labels" ChaosBladeOriginalAnnsAnn = "chaosblade.io/original-annotations" ) type ConfigMapDeleteActionSpec struct { spec.BaseExpActionCommandSpec } func NewConfigMapDeleteActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &ConfigMapDeleteActionSpec{ spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: ConfigMapNameFlag, Desc: "The ConfigMap name to delete. If not specified, the first non-optional ConfigMap from the Pod spec will be selected", }, }, ActionExecutor: &ConfigMapDeleteActionExecutor{client: client}, ActionExample: `# Delete the auto-selected required ConfigMap for pods matching labels blade create k8s pod-pod configmapdelete --labels "app=test" --namespace default # Delete a specific ConfigMap blade create k8s pod-pod configmapdelete --labels "app=test" --namespace default --configmap-name my-config `, ActionCategories: []string{model.CategorySystemContainer}, }, } } func (*ConfigMapDeleteActionSpec) Name() string { return "configmapdelete" } func (*ConfigMapDeleteActionSpec) Aliases() []string { return []string{} } func (*ConfigMapDeleteActionSpec) ShortDesc() string { return "Delete ConfigMap to simulate Pod startup failure" } func (*ConfigMapDeleteActionSpec) LongDesc() string { return "Delete a ConfigMap that a Pod depends on, then restart the Pod to simulate startup failure " + "caused by missing ConfigMap. The original ConfigMap is backed up and restored when the experiment is destroyed." } type ConfigMapDeleteActionExecutor struct { client *channel.Client } func (*ConfigMapDeleteActionExecutor) Name() string { return "configmapdelete" } func (*ConfigMapDeleteActionExecutor) SetChannel(channel spec.Channel) {} func (d *ConfigMapDeleteActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(ctx, expModel) } return d.create(ctx, expModel) } // create backs up the target ConfigMap, deletes it, then deletes the Pod to trigger a restart failure. func (d *ConfigMapDeleteActionExecutor) create(ctx context.Context, expModel *spec.ExpModel) *spec.Response { configMapName := expModel.ActionFlags[ConfigMapNameFlag] experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(experimentId, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } statuses := make([]v1alpha1.ResourceStatus, 0) success := false processedCMs := make(map[string]bool) for _, c := range containerMatchedList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: c.GetIdentifier(), } // Step 1: Fetch the Pod pod := &v1.Pod{} if err := d.client.Get(ctx, types.NamespacedName{Name: c.PodName, Namespace: c.Namespace}, pod); err != nil { logrusField.Errorf("get pod %s/%s failed: %v", c.Namespace, c.PodName, err) status = status.CreateFailResourceStatus(spec.K8sExecFailed.Sprintf("get pod", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } // Skip pods that are not in a healthy state if !isPodReady(pod) { logrusField.Infof("pod %s/%s is not ready, skip", c.Namespace, c.PodName) status = status.CreateFailResourceStatus( fmt.Sprintf("pod %s is not ready", c.PodName), spec.K8sExecFailed.Code, ) statuses = append(statuses, status) continue } // Step 2: Resolve the target ConfigMap resolvedCMName, resolveErr := resolveTargetConfigMap(pod, configMapName) if resolveErr != nil { logrusField.Errorf("resolve configmap for pod %s/%s failed: %v", c.Namespace, c.PodName, resolveErr) status = status.CreateFailResourceStatus(resolveErr.Error(), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } cmKey := fmt.Sprintf("%s/%s", c.Namespace, resolvedCMName) // Step 3: Deduplicate — only backup & delete each ConfigMap once if !processedCMs[cmKey] { // Step 4: Fetch the original ConfigMap originalCM := &v1.ConfigMap{} if err := d.client.Get(ctx, types.NamespacedName{Name: resolvedCMName, Namespace: c.Namespace}, originalCM); err != nil { logrusField.Errorf("get configmap %s/%s failed: %v", c.Namespace, resolvedCMName, err) status = status.CreateFailResourceStatus( fmt.Sprintf("configmap %s not found in namespace %s", resolvedCMName, c.Namespace), spec.K8sExecFailed.Code, ) statuses = append(statuses, status) continue } // Step 5: Create backup ConfigMap // (originalCM is declared inside the dedup block above) if err := d.createBackupConfigMap(ctx, experimentId, originalCM); err != nil { logrusField.Errorf("create backup configmap for %s/%s failed: %v", c.Namespace, resolvedCMName, err) status = status.CreateFailResourceStatus( fmt.Sprintf("create backup configmap failed: %v", err), spec.K8sExecFailed.Code, ) statuses = append(statuses, status) continue } logrusField.Infof("created backup configmap for %s/%s", c.Namespace, resolvedCMName) // Step 6: Delete the original ConfigMap if err := d.client.Delete(ctx, originalCM); err != nil && !apierrors.IsNotFound(err) { logrusField.Errorf("delete configmap %s/%s failed: %v", c.Namespace, resolvedCMName, err) // Rollback: delete the backup backupName := getBackupConfigMapName(experimentId, c.Namespace, resolvedCMName) if rbErr := d.client.Delete(ctx, &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Name: backupName, Namespace: c.Namespace}, }); rbErr != nil && !apierrors.IsNotFound(rbErr) { logrusField.Warningf("rollback: delete backup configmap %s failed: %v", backupName, rbErr) } status = status.CreateFailResourceStatus( fmt.Sprintf("delete configmap %s failed: %v", resolvedCMName, err), spec.K8sExecFailed.Code, ) statuses = append(statuses, status) continue } logrusField.Infof("deleted configmap %s/%s", c.Namespace, resolvedCMName) // Mark as processed only after both backup and delete succeed processedCMs[cmKey] = true } // Step 7: Delete the Pod to trigger rebuild if err := d.client.Delete(ctx, pod); err != nil && !apierrors.IsNotFound(err) { logrusField.Errorf("delete pod %s/%s failed: %v", c.Namespace, c.PodName, err) // The ConfigMap is already deleted. Attempt to restore it from backup. backupName := getBackupConfigMapName(experimentId, c.Namespace, resolvedCMName) if restoreErr := d.restoreAndCleanupBackup(ctx, c.Namespace, backupName); restoreErr != nil { logrusField.Errorf("rollback: restore configmap from backup %s failed: %v, manual intervention required", backupName, restoreErr) status = status.CreateFailResourceStatus( fmt.Sprintf("configmap %s has been deleted but restore failed: %v, manual intervention required", resolvedCMName, restoreErr), spec.K8sExecFailed.Code, ) statuses = append(statuses, status) continue } status = status.CreateFailResourceStatus( fmt.Sprintf("delete pod %s failed: %v", c.PodName, err), spec.K8sExecFailed.Code, ) statuses = append(statuses, status) continue } logrusField.Infof("deleted pod %s/%s to trigger rebuild", c.Namespace, c.PodName) status = status.CreateSuccessResourceStatus() statuses = append(statuses, status) success = true } var experimentStatus v1alpha1.ExperimentStatus if success { experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses) } return spec.ReturnResultIgnoreCode(experimentStatus) } // destroy restores the backed-up ConfigMap and deletes the Pod to trigger a healthy restart. func (d *ConfigMapDeleteActionExecutor) destroy(ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(experimentId, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } // Find all backup ConfigMaps for this experiment across all relevant namespaces namespaces := make(map[string]bool) for _, c := range containerMatchedList { namespaces[c.Namespace] = true } // Collect backup CMs by namespace allSuccess := true backupCMs := make(map[string][]*v1.ConfigMap) // namespace -> list of backup CMs for ns := range namespaces { cmList := &v1.ConfigMapList{} labelSelector := labels.SelectorFromSet(labels.Set{ ChaosBladeExperimentLabel: experimentId, ChaosBladeBackupLabel: "configmap", }) if err := d.client.List(ctx, cmList, &client.ListOptions{ Namespace: ns, LabelSelector: labelSelector, }); err != nil { logrusField.Errorf("list backup configmaps in namespace %s failed: %v", ns, err) allSuccess = false continue } for i := range cmList.Items { backupCMs[ns] = append(backupCMs[ns], &cmList.Items[i]) } } // Restore all backup ConfigMaps restoredCMs := make(map[string]bool) // key: namespace/backupName for ns, cms := range backupCMs { for _, backupCM := range cms { backupKey := fmt.Sprintf("%s/%s", ns, backupCM.Name) if restoredCMs[backupKey] { continue } restoredCMs[backupKey] = true if err := d.restoreConfigMapFromBackup(ctx, backupCM); err != nil { logrusField.Errorf("restore configmap from backup %s/%s failed: %v", ns, backupCM.Name, err) allSuccess = false // Do NOT delete the backup when restore fails — preserve it for retry continue } if err := d.deleteBackupConfigMap(ctx, backupCM); err != nil { logrusField.Warningf("delete backup configmap %s/%s failed: %v", ns, backupCM.Name, err) } } } // Delete all matched Pods to trigger healthy rebuilds statuses := make([]v1alpha1.ResourceStatus, 0) for _, c := range containerMatchedList { status := v1alpha1.ResourceStatus{ Id: c.Id, Kind: v1alpha1.PodKind, Identifier: c.GetIdentifier(), } pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: c.PodName, Namespace: c.Namespace, }, } if err := d.client.Delete(ctx, pod); err != nil { if apierrors.IsNotFound(err) { logrusField.Infof("pod %s/%s already deleted", c.Namespace, c.PodName) } else { logrusField.Errorf("delete pod %s/%s failed: %v", c.Namespace, c.PodName, err) status = status.CreateFailResourceStatus( fmt.Sprintf("delete pod %s failed: %v", c.PodName, err), spec.K8sExecFailed.Code, ) statuses = append(statuses, status) allSuccess = false continue } } else { logrusField.Infof("deleted pod %s/%s to trigger healthy rebuild", c.Namespace, c.PodName) } status = status.CreateSuccessResourceStatus() status.State = v1alpha1.DestroyedState statuses = append(statuses, status) } if allSuccess { return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus(statuses)) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses)) } // createBackupConfigMap creates a backup copy of the original ConfigMap. func (d *ConfigMapDeleteActionExecutor) createBackupConfigMap(ctx context.Context, experimentId string, originalCM *v1.ConfigMap) error { backupName := getBackupConfigMapName(experimentId, originalCM.Namespace, originalCM.Name) annotations := map[string]string{ ChaosBladeOriginalNameAnn: originalCM.Name, ChaosBladeOriginalNamespaceAnn: originalCM.Namespace, ChaosBladeExperimentAnnotation: experimentId, } // Preserve original labels and annotations as JSON if len(originalCM.Labels) > 0 { if labelsJSON, err := json.Marshal(originalCM.Labels); err != nil { logrus.Warningf("failed to marshal original labels for configmap %s/%s: %v", originalCM.Namespace, originalCM.Name, err) } else { annotations[ChaosBladeOriginalLabelsAnn] = string(labelsJSON) } } if len(originalCM.Annotations) > 0 { if annsJSON, err := json.Marshal(originalCM.Annotations); err != nil { logrus.Warningf("failed to marshal original annotations for configmap %s/%s: %v", originalCM.Namespace, originalCM.Name, err) } else { annotations[ChaosBladeOriginalAnnsAnn] = string(annsJSON) } } backupCM := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: backupName, Namespace: originalCM.Namespace, Labels: map[string]string{ ChaosBladeExperimentLabel: experimentId, ChaosBladeBackupLabel: "configmap", }, Annotations: annotations, }, Data: copyStringMap(originalCM.Data), BinaryData: copyByteMap(originalCM.BinaryData), } if err := d.client.Create(ctx, backupCM); err != nil { if apierrors.IsAlreadyExists(err) { logrus.WithField("experiment", experimentId).Infof("backup configmap %s already exists, skip creation", backupName) return nil } return err } return nil } // restoreConfigMapFromBackup recreates the original ConfigMap from a backup ConfigMap object. // It reads the original name, namespace, labels, and annotations from the backup's annotations. // Returns nil if the original ConfigMap already exists (AlreadyExists is treated as success). func (d *ConfigMapDeleteActionExecutor) restoreConfigMapFromBackup(ctx context.Context, backupCM *v1.ConfigMap) error { originalName := backupCM.Annotations[ChaosBladeOriginalNameAnn] originalNamespace := backupCM.Annotations[ChaosBladeOriginalNamespaceAnn] if originalNamespace == "" { originalNamespace = backupCM.Namespace } // Restore original labels var originalLabels map[string]string if labelsJSON := backupCM.Annotations[ChaosBladeOriginalLabelsAnn]; labelsJSON != "" { if err := json.Unmarshal([]byte(labelsJSON), &originalLabels); err != nil { logrus.Warningf("failed to unmarshal original labels from backup %s/%s: %v", backupCM.Namespace, backupCM.Name, err) } } // Restore original annotations var originalAnnotations map[string]string if annsJSON := backupCM.Annotations[ChaosBladeOriginalAnnsAnn]; annsJSON != "" { if err := json.Unmarshal([]byte(annsJSON), &originalAnnotations); err != nil { logrus.Warningf("failed to unmarshal original annotations from backup %s/%s: %v", backupCM.Namespace, backupCM.Name, err) } } restoredCM := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: originalName, Namespace: originalNamespace, Labels: originalLabels, Annotations: originalAnnotations, }, Data: backupCM.Data, BinaryData: backupCM.BinaryData, } if err := d.client.Create(ctx, restoredCM); err != nil { if apierrors.IsAlreadyExists(err) { logrus.Infof("configmap %s/%s already exists, skip restore", originalNamespace, originalName) return nil } return fmt.Errorf("restore configmap %s/%s: %w", originalNamespace, originalName, err) } return nil } // deleteBackupConfigMap removes a backup ConfigMap. NotFound is treated as success. func (d *ConfigMapDeleteActionExecutor) deleteBackupConfigMap(ctx context.Context, backupCM *v1.ConfigMap) error { if err := d.client.Delete(ctx, backupCM); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("delete backup configmap %s/%s: %w", backupCM.Namespace, backupCM.Name, err) } return nil } // restoreAndCleanupBackup fetches the backup ConfigMap by name, restores the original, and deletes the backup. // Used during create-phase rollback when we need to look up the backup by name. func (d *ConfigMapDeleteActionExecutor) restoreAndCleanupBackup(ctx context.Context, namespace, backupName string) error { backupCM := &v1.ConfigMap{} if err := d.client.Get(ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, backupCM); err != nil { return fmt.Errorf("get backup configmap %s/%s: %w", namespace, backupName, err) } if err := d.restoreConfigMapFromBackup(ctx, backupCM); err != nil { return err } return d.deleteBackupConfigMap(ctx, backupCM) } // ConfigMapRef describes a ConfigMap reference found in a Pod spec. type ConfigMapRef struct { Name string Optional bool Source string // "volume", "envFrom", "envValueFrom" } // collectConfigMapReferences scans a Pod spec and returns all ConfigMap references. // It scans Volumes, EnvFrom and Env ValueFrom across all containers and init containers. // Duplicate names are merged: if any reference is required, the result is required. func collectConfigMapReferences(pod *v1.Pod) []ConfigMapRef { seen := make(map[string]int) // name -> index in result var refs []ConfigMapRef addRef := func(name string, optional bool, source string) { if idx, exists := seen[name]; exists { // If any reference is required, mark as required if !optional { refs[idx].Optional = false } return } seen[name] = len(refs) refs = append(refs, ConfigMapRef{Name: name, Optional: optional, Source: source}) } // Scan volumes first (highest priority for auto-selection) for _, vol := range pod.Spec.Volumes { if vol.ConfigMap != nil { addRef(vol.ConfigMap.LocalObjectReference.Name, isOptional(vol.ConfigMap.Optional), "volume") } } // Scan all containers (regular + init) allContainers := make([]v1.Container, 0, len(pod.Spec.Containers)+len(pod.Spec.InitContainers)) allContainers = append(allContainers, pod.Spec.Containers...) allContainers = append(allContainers, pod.Spec.InitContainers...) for _, ctr := range allContainers { for _, envFrom := range ctr.EnvFrom { if envFrom.ConfigMapRef != nil { addRef(envFrom.ConfigMapRef.LocalObjectReference.Name, isOptional(envFrom.ConfigMapRef.Optional), "envFrom") } } for _, env := range ctr.Env { if env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil { addRef(env.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name, isOptional(env.ValueFrom.ConfigMapKeyRef.Optional), "envValueFrom") } } } return refs } // resolveTargetConfigMap determines which ConfigMap to target for chaos injection. // If userSpecifiedName is provided, it validates that the ConfigMap is referenced and non-optional. // Otherwise, it returns the first non-optional ConfigMap. func resolveTargetConfigMap(pod *v1.Pod, userSpecifiedName string) (string, error) { refs := collectConfigMapReferences(pod) if len(refs) == 0 { return "", fmt.Errorf("pod %s has no ConfigMap dependency", pod.Name) } if userSpecifiedName != "" { for _, ref := range refs { if ref.Name == userSpecifiedName { if ref.Optional { return "", fmt.Errorf("configmap %s is optional in pod %s, only required ConfigMaps can be deleted for chaos", userSpecifiedName, pod.Name) } return userSpecifiedName, nil } } return "", fmt.Errorf("configmap %s is not referenced by pod %s", userSpecifiedName, pod.Name) } // Auto-select the first non-optional ConfigMap for _, ref := range refs { if !ref.Optional { return ref.Name, nil } } return "", fmt.Errorf("pod %s has no required (non-optional) ConfigMap dependency", pod.Name) } // isOptional returns true only when the *bool pointer is non-nil and true. func isOptional(opt *bool) bool { return opt != nil && *opt } // getBackupConfigMapName generates a deterministic backup name from experiment ID and ConfigMap identity. // The experimentId is truncated to 8 chars to keep names short; the full ID is stored in labels for querying. func getBackupConfigMapName(experimentId, namespace, cmName string) string { hash := sha256.Sum256([]byte(fmt.Sprintf("%s/%s/%s", experimentId, namespace, cmName))) hashStr := fmt.Sprintf("%x", hash[:4]) expIdPrefix := experimentId if len(expIdPrefix) > 8 { expIdPrefix = expIdPrefix[:8] } return fmt.Sprintf("chaosblade-backup-%s-%s", expIdPrefix, hashStr) } func copyStringMap(src map[string]string) map[string]string { if src == nil { return nil } dst := make(map[string]string, len(src)) for k, v := range src { dst[k] = v } return dst } func copyByteMap(src map[string][]byte) map[string][]byte { if src == nil { return nil } dst := make(map[string][]byte, len(src)) for k, v := range src { cp := make([]byte, len(v)) copy(cp, v) dst[k] = cp } return dst } ================================================ FILE: exec/pod/configmapdeleteexp_test.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "regexp" "strings" "testing" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func boolPtr(b bool) *bool { return &b } func TestCollectConfigMapReferences(t *testing.T) { tests := []struct { name string pod *v1.Pod wantLen int wantRefs []ConfigMapRef }{ { name: "volume configmap only", pod: &v1.Pod{ Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "config-vol", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: "app-config"}, }, }, }, }, Containers: []v1.Container{{Name: "main"}}, }, }, wantLen: 1, wantRefs: []ConfigMapRef{ {Name: "app-config", Optional: false, Source: "volume"}, }, }, { name: "envFrom configmap optional", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "main", EnvFrom: []v1.EnvFromSource{ { ConfigMapRef: &v1.ConfigMapEnvSource{ LocalObjectReference: v1.LocalObjectReference{Name: "env-config"}, Optional: boolPtr(true), }, }, }, }, }, }, }, wantLen: 1, wantRefs: []ConfigMapRef{ {Name: "env-config", Optional: true, Source: "envFrom"}, }, }, { name: "env valueFrom configmap", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "main", Env: []v1.EnvVar{ { Name: "MY_VAR", ValueFrom: &v1.EnvVarSource{ ConfigMapKeyRef: &v1.ConfigMapKeySelector{ LocalObjectReference: v1.LocalObjectReference{Name: "key-config"}, }, }, }, }, }, }, }, }, wantLen: 1, wantRefs: []ConfigMapRef{ {Name: "key-config", Optional: false, Source: "envValueFrom"}, }, }, { name: "mixed sources with deduplication - required wins", pod: &v1.Pod{ Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "vol", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: "shared-config"}, Optional: boolPtr(true), }, }, }, }, Containers: []v1.Container{ { Name: "main", EnvFrom: []v1.EnvFromSource{ { ConfigMapRef: &v1.ConfigMapEnvSource{ LocalObjectReference: v1.LocalObjectReference{Name: "shared-config"}, }, }, }, }, }, }, }, wantLen: 1, wantRefs: []ConfigMapRef{ {Name: "shared-config", Optional: false, Source: "volume"}, }, }, { name: "no configmap references", pod: &v1.Pod{ Spec: v1.PodSpec{ Containers: []v1.Container{ {Name: "main"}, }, }, }, wantLen: 0, wantRefs: nil, }, { name: "init container configmap", pod: &v1.Pod{ Spec: v1.PodSpec{ InitContainers: []v1.Container{ { Name: "init", EnvFrom: []v1.EnvFromSource{ { ConfigMapRef: &v1.ConfigMapEnvSource{ LocalObjectReference: v1.LocalObjectReference{Name: "init-config"}, }, }, }, }, }, Containers: []v1.Container{{Name: "main"}}, }, }, wantLen: 1, wantRefs: []ConfigMapRef{ {Name: "init-config", Optional: false, Source: "envFrom"}, }, }, { name: "multiple distinct configmaps ordered volume > envFrom > envValueFrom", pod: &v1.Pod{ Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "vol", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: "vol-cm"}, }, }, }, }, Containers: []v1.Container{ { Name: "main", EnvFrom: []v1.EnvFromSource{ { ConfigMapRef: &v1.ConfigMapEnvSource{ LocalObjectReference: v1.LocalObjectReference{Name: "env-cm"}, }, }, }, Env: []v1.EnvVar{ { Name: "VAR", ValueFrom: &v1.EnvVarSource{ ConfigMapKeyRef: &v1.ConfigMapKeySelector{ LocalObjectReference: v1.LocalObjectReference{Name: "key-cm"}, }, }, }, }, }, }, }, }, wantLen: 3, wantRefs: []ConfigMapRef{ {Name: "vol-cm", Optional: false, Source: "volume"}, {Name: "env-cm", Optional: false, Source: "envFrom"}, {Name: "key-cm", Optional: false, Source: "envValueFrom"}, }, }, { name: "volume with explicit optional false is required", pod: &v1.Pod{ Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "vol", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: "explicit-required"}, Optional: boolPtr(false), }, }, }, }, Containers: []v1.Container{{Name: "main"}}, }, }, wantLen: 1, wantRefs: []ConfigMapRef{ {Name: "explicit-required", Optional: false, Source: "volume"}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := collectConfigMapReferences(tt.pod) if len(got) != tt.wantLen { t.Fatalf("collectConfigMapReferences() returned %d refs, want %d", len(got), tt.wantLen) } for i, want := range tt.wantRefs { if got[i].Name != want.Name { t.Errorf("ref[%d].Name = %q, want %q", i, got[i].Name, want.Name) } if got[i].Optional != want.Optional { t.Errorf("ref[%d].Optional = %v, want %v", i, got[i].Optional, want.Optional) } if got[i].Source != want.Source { t.Errorf("ref[%d].Source = %q, want %q", i, got[i].Source, want.Source) } } }) } } func TestResolveTargetConfigMap(t *testing.T) { tests := []struct { name string pod *v1.Pod userSpecifiedName string wantCM string wantErr string }{ { name: "user specifies valid required configmap", pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "vol", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: "my-config"}, }, }, }, }, Containers: []v1.Container{{Name: "main"}}, }, }, userSpecifiedName: "my-config", wantCM: "my-config", }, { name: "user specifies optional configmap - error", pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "vol", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: "opt-config"}, Optional: boolPtr(true), }, }, }, }, Containers: []v1.Container{{Name: "main"}}, }, }, userSpecifiedName: "opt-config", wantErr: "optional", }, { name: "user specifies non-existent configmap - error", pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "vol", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: "real-config"}, }, }, }, }, Containers: []v1.Container{{Name: "main"}}, }, }, userSpecifiedName: "not-exist", wantErr: "not referenced", }, { name: "auto select first required configmap", pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "vol1", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: "first-config"}, }, }, }, { Name: "vol2", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: "second-config"}, }, }, }, }, Containers: []v1.Container{{Name: "main"}}, }, }, userSpecifiedName: "", wantCM: "first-config", }, { name: "auto select skips optional - picks required", pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "vol", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: "optional-config"}, Optional: boolPtr(true), }, }, }, }, Containers: []v1.Container{ { Name: "main", EnvFrom: []v1.EnvFromSource{ { ConfigMapRef: &v1.ConfigMapEnvSource{ LocalObjectReference: v1.LocalObjectReference{Name: "required-config"}, }, }, }, }, }, }, }, userSpecifiedName: "", wantCM: "required-config", }, { name: "all optional - error", pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "vol", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: "opt-config"}, Optional: boolPtr(true), }, }, }, }, Containers: []v1.Container{{Name: "main"}}, }, }, userSpecifiedName: "", wantErr: "no required", }, { name: "no configmap at all - error", pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, Spec: v1.PodSpec{ Containers: []v1.Container{{Name: "main"}}, }, }, userSpecifiedName: "", wantErr: "no ConfigMap dependency", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := resolveTargetConfigMap(tt.pod, tt.userSpecifiedName) if tt.wantErr != "" { if err == nil { t.Fatalf("resolveTargetConfigMap() returned nil error, want error containing %q", tt.wantErr) } if !strings.Contains(err.Error(), tt.wantErr) { t.Errorf("resolveTargetConfigMap() error = %q, want to contain %q", err.Error(), tt.wantErr) } return } if err != nil { t.Fatalf("resolveTargetConfigMap() returned unexpected error: %v", err) } if got != tt.wantCM { t.Errorf("resolveTargetConfigMap() = %q, want %q", got, tt.wantCM) } }) } } func TestGetBackupConfigMapName(t *testing.T) { tests := []struct { name string experimentId string namespace string cmName string }{ { name: "basic", experimentId: "exp-12345", namespace: "default", cmName: "my-config", }, { name: "with uuid", experimentId: "a1b2c3d4-e5f6-7890-abcd-ef1234567890", namespace: "production", cmName: "app-settings", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result1 := getBackupConfigMapName(tt.experimentId, tt.namespace, tt.cmName) result2 := getBackupConfigMapName(tt.experimentId, tt.namespace, tt.cmName) // Deterministic if result1 != result2 { t.Errorf("getBackupConfigMapName() not deterministic: %q != %q", result1, result2) } // Valid DNS subdomain name (lowercase alphanumeric and hyphens) validDNS := regexp.MustCompile(`^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$`) if !validDNS.MatchString(result1) { t.Errorf("getBackupConfigMapName() result %q is not a valid DNS subdomain", result1) } // Within K8s name limit if len(result1) > 253 { t.Errorf("getBackupConfigMapName() result length %d exceeds 253", len(result1)) } }) } // Different inputs produce different outputs r1 := getBackupConfigMapName("exp1", "ns1", "cm1") r2 := getBackupConfigMapName("exp1", "ns1", "cm2") r3 := getBackupConfigMapName("exp1", "ns2", "cm1") r4 := getBackupConfigMapName("exp2", "ns1", "cm1") if r1 == r2 { t.Errorf("different cmName should produce different results: %q == %q", r1, r2) } if r1 == r3 { t.Errorf("different namespace should produce different results: %q == %q", r1, r3) } if r1 == r4 { t.Errorf("different experimentId should produce different results: %q == %q", r1, r4) } // ExperimentId truncation: short IDs should not be truncated shortResult := getBackupConfigMapName("abc", "ns", "cm") if !strings.Contains(shortResult, "abc") { t.Errorf("short experimentId should appear in name, got %q", shortResult) } // ExperimentId truncation: long IDs should be truncated to 8 chars longId := "a1b2c3d4-e5f6-7890-abcd-ef1234567890" longResult := getBackupConfigMapName(longId, "ns", "cm") if strings.Contains(longResult, longId) { t.Errorf("long experimentId should be truncated, but full ID found in %q", longResult) } if !strings.Contains(longResult, longId[:8]) { t.Errorf("long experimentId should keep first 8 chars %q, got %q", longId[:8], longResult) } // Name length is bounded regardless of input lengths veryLongId := strings.Repeat("x", 200) boundedResult := getBackupConfigMapName(veryLongId, "ns", "cm") // "chaosblade-backup-" (18) + 8 (expId) + "-" (1) + 8 (hash) = 35 if len(boundedResult) > 35 { t.Errorf("name should be bounded, got length %d: %q", len(boundedResult), boundedResult) } } func TestIsOptional(t *testing.T) { tests := []struct { name string opt *bool want bool }{ {"nil is required", nil, false}, {"true is optional", boolPtr(true), true}, {"false is required", boolPtr(false), false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := isOptional(tt.opt) if got != tt.want { t.Errorf("isOptional(%v) = %v, want %v", tt.opt, got, tt.want) } }) } } func TestCopyStringMap(t *testing.T) { t.Run("nil input returns nil", func(t *testing.T) { got := copyStringMap(nil) if got != nil { t.Errorf("copyStringMap(nil) = %v, want nil", got) } }) t.Run("empty map returns empty map", func(t *testing.T) { got := copyStringMap(map[string]string{}) if got == nil || len(got) != 0 { t.Errorf("copyStringMap(empty) = %v, want empty non-nil map", got) } }) t.Run("copies all entries", func(t *testing.T) { src := map[string]string{"a": "1", "b": "2"} got := copyStringMap(src) if len(got) != 2 || got["a"] != "1" || got["b"] != "2" { t.Errorf("copyStringMap() = %v, want %v", got, src) } }) t.Run("modifying copy does not affect original", func(t *testing.T) { src := map[string]string{"key": "val"} dst := copyStringMap(src) dst["key"] = "changed" dst["new"] = "added" if src["key"] != "val" { t.Errorf("original was modified: src[key] = %q, want %q", src["key"], "val") } if _, exists := src["new"]; exists { t.Error("original gained new key from copy modification") } }) } func TestCopyByteMap(t *testing.T) { t.Run("nil input returns nil", func(t *testing.T) { got := copyByteMap(nil) if got != nil { t.Errorf("copyByteMap(nil) = %v, want nil", got) } }) t.Run("empty map returns empty map", func(t *testing.T) { got := copyByteMap(map[string][]byte{}) if got == nil || len(got) != 0 { t.Errorf("copyByteMap(empty) = %v, want empty non-nil map", got) } }) t.Run("copies all entries", func(t *testing.T) { src := map[string][]byte{"a": {1, 2, 3}, "b": {4, 5}} got := copyByteMap(src) if len(got) != 2 { t.Fatalf("copyByteMap() returned %d entries, want 2", len(got)) } if len(got["a"]) != 3 || got["a"][0] != 1 || got["a"][2] != 3 { t.Errorf("copyByteMap()[a] = %v, want [1 2 3]", got["a"]) } }) t.Run("deep copy - modifying byte slice does not affect original", func(t *testing.T) { original := []byte{10, 20, 30} src := map[string][]byte{"data": original} dst := copyByteMap(src) // Modify the copy dst["data"][0] = 99 // Original should be unchanged if src["data"][0] != 10 { t.Errorf("original byte slice was modified: src[data][0] = %d, want 10", src["data"][0]) } }) } ================================================ FILE: exec/pod/containercreating.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "fmt" "strings" "time" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ( // ChaosBladePVAnnotation is the annotation for PV resources created by containercreating action ChaosBladePVAnnotation = "chaosblade.io/pv" // ChaosBladePVCAnnotation is the annotation for PVC resources created by containercreating action ChaosBladePVCAnnotation = "chaosblade.io/pvc" // ChaosBladePodAnnotation is the annotation for Pod resources created by containercreating action ChaosBladePodAnnotation = "chaosblade.io/pod" // ChaosBladeExperimentAnnotation is the annotation key for experiment ID ChaosBladeExperimentAnnotation = "chaosblade.io/experiment" // ChaosBladeActionCreate is the annotation value for create action ChaosBladeActionCreate = "create" ) type PodContainerCreatingActionSpec struct { spec.BaseExpActionCommandSpec client *channel.Client } func NewPodContainerCreatingActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &PodContainerCreatingActionSpec{ BaseExpActionCommandSpec: spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: "volume-mount-path", Desc: "Volume mount path in the container. Default: /mnt/data", }, }, ActionExecutor: &PodContainerCreatingActionExecutor{client: client}, ActionExample: `# Create a pod stuck in ContainerCreating state in the default namespace blade create k8s pod-pod containercreating --namespace default --kubeconfig ~/.kube/config # Create a pod stuck in ContainerCreating state with custom volume mount path blade create k8s pod-pod containercreating --namespace default --volume-mount-path /data --kubeconfig ~/.kube/config `, ActionCategories: []string{model.CategorySystemContainer}, }, client: client, } } func (*PodContainerCreatingActionSpec) Name() string { return "containercreating" } func (*PodContainerCreatingActionSpec) Aliases() []string { return []string{} } func (*PodContainerCreatingActionSpec) ShortDesc() string { return "Make pod stuck in ContainerCreating state by PVC mount failure" } func (*PodContainerCreatingActionSpec) LongDesc() string { return "Simulate the scenario where a Pod is stuck in ContainerCreating state due to storage volume mount failure. " + "This fault is injected by creating a PV with an unreachable NFS server and a PVC bound to it, " + "then creating a Pod that mounts this PVC. Since the NFS server is unreachable, the volume mount fails " + "and the Pod remains stuck in ContainerCreating state. " + "When the experiment is destroyed, the created Pod, PVC, and PV will be cleaned up." } type PodContainerCreatingActionExecutor struct { client *channel.Client } func (*PodContainerCreatingActionExecutor) Name() string { return "containercreating" } func (*PodContainerCreatingActionExecutor) SetChannel(channel spec.Channel) {} func (d *PodContainerCreatingActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(uid, ctx, expModel) } return d.create(uid, ctx, expModel) } func (d *PodContainerCreatingActionExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } // Parse flags with defaults volumeMountPath := expModel.ActionFlags["volume-mount-path"] if volumeMountPath == "" { volumeMountPath = "/mnt/data" } // Deduplicate by namespace - create one faulty PV+PVC+Pod per unique namespace seenNamespaces := make(map[string]bool) statuses := make([]v1alpha1.ResourceStatus, 0) success := false for _, meta := range containerObjectMetaList { if seenNamespaces[meta.Namespace] { continue } seenNamespaces[meta.Namespace] = true pvName := fmt.Sprintf("chaosblade-cc-%s-pv", experimentId) pvcName := fmt.Sprintf("chaosblade-cc-%s-pvc", experimentId) podName := fmt.Sprintf("chaosblade-cc-%s-pod", experimentId) status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: fmt.Sprintf("%s//%s", meta.Namespace, podName), } // Step 1: Create PV with unreachable NFS server if err := d.createPV(ctx, pvName, experimentId); err != nil { if apierrors.IsAlreadyExists(err) { logrusField.Infof("PV %s already exists, skip creation", pvName) } else { logrusField.Warningf("create PV %s failed: %v", pvName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("create PV failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } } else { logrusField.Infof("created PV %s with unreachable NFS server", pvName) } // Step 2: Create PVC bound to the PV (PVC will be Bound, but mount will fail) if err := d.createPVC(ctx, meta.Namespace, pvcName, pvName, experimentId); err != nil { if apierrors.IsAlreadyExists(err) { logrusField.Infof("PVC %s/%s already exists, skip creation", meta.Namespace, pvcName) } else { logrusField.Warningf("create PVC %s/%s failed: %v", meta.Namespace, pvcName, err) // Best-effort rollback: delete the PV we just created. // If rollback fails, the PV will be leaked because we record // a failed status and Destroy only processes successful ones. // To prevent leaks, record success so Destroy will retry cleanup. pvDeleted := true if delErr := d.deletePV(ctx, pvName); delErr != nil { logrusField.Warningf("rollback PV %s failed: %v", pvName, delErr) pvDeleted = false } if pvDeleted { status = status.CreateFailResourceStatus(fmt.Sprintf("create PVC failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) } else { logrusField.Warningf("rollback incomplete, recording success status to ensure Destroy can clean up") status = status.CreateSuccessResourceStatus() statuses = append(statuses, status) success = true } continue } } else { logrusField.Infof("created PVC %s/%s bound to PV %s", meta.Namespace, pvcName, pvName) } // Step 3: Wait for PVC to be Bound before creating Pod if err := d.waitForPVCBound(ctx, meta.Namespace, pvcName, 30*time.Second); err != nil { logrusField.Warningf("PVC %s/%s is not bound yet: %v", meta.Namespace, pvcName, err) } // Step 4: Create Pod that mounts the PVC (will be stuck in ContainerCreating) if err := d.createPod(ctx, meta.Namespace, podName, pvcName, volumeMountPath, experimentId); err != nil { if apierrors.IsAlreadyExists(err) { logrusField.Infof("Pod %s/%s already exists, skip creation", meta.Namespace, podName) } else { logrusField.Warningf("create Pod %s/%s failed: %v", meta.Namespace, podName, err) // Best-effort rollback: delete PVC and PV. // If rollback fails, resources will be leaked because we record // a failed status and Destroy only processes successful ones. // To prevent leaks, we still record success so Destroy will // attempt cleanup (destroy is idempotent and handles NotFound). pvcDeleted := false if delErr := d.deletePVC(ctx, meta.Namespace, pvcName); delErr != nil { logrusField.Warningf("rollback PVC %s/%s failed: %v", meta.Namespace, pvcName, delErr) } else { pvcDeleted = true } pvDeleted := true if delErr := d.deletePV(ctx, pvName); delErr != nil { logrusField.Warningf("rollback PV %s failed: %v", pvName, delErr) pvDeleted = false } // If rollback fully succeeded, record failure (no leaked resources). // If any rollback step failed, record success so Destroy will retry cleanup. if pvcDeleted && pvDeleted { status = status.CreateFailResourceStatus(fmt.Sprintf("create Pod failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) } else { logrusField.Warningf("rollback incomplete, recording success status to ensure Destroy can clean up") status = status.CreateSuccessResourceStatus() statuses = append(statuses, status) success = true } continue } } else { logrusField.Infof("created Pod %s/%s which will be stuck in ContainerCreating state", meta.Namespace, podName) } status = status.CreateSuccessResourceStatus() statuses = append(statuses, status) success = true } var experimentStatus v1alpha1.ExperimentStatus if success { experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses) } return spec.ReturnResultIgnoreCode(experimentStatus) } func (d *PodContainerCreatingActionExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } statuses := make([]v1alpha1.ResourceStatus, 0) allSuccess := true seenNamespaces := make(map[string]bool) for _, meta := range containerObjectMetaList { if seenNamespaces[meta.Namespace] { continue } seenNamespaces[meta.Namespace] = true pvName := fmt.Sprintf("chaosblade-cc-%s-pv", experimentId) pvcName := fmt.Sprintf("chaosblade-cc-%s-pvc", experimentId) podName := fmt.Sprintf("chaosblade-cc-%s-pod", experimentId) namespace := meta.Namespace status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: fmt.Sprintf("%s//%s", namespace, podName), } // Step 1: Delete Pod if err := d.deletePod(ctx, namespace, podName); err != nil { if apierrors.IsNotFound(err) { logrusField.Infof("Pod %s/%s already deleted", namespace, podName) } else { logrusField.Warningf("delete Pod %s/%s failed: %v", namespace, podName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("delete Pod failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) allSuccess = false continue } } else { logrusField.Infof("deleted Pod %s/%s", namespace, podName) } // Step 2: Delete PVC if err := d.deletePVC(ctx, namespace, pvcName); err != nil { if apierrors.IsNotFound(err) { logrusField.Infof("PVC %s/%s already deleted", namespace, pvcName) } else { logrusField.Warningf("delete PVC %s/%s failed: %v", namespace, pvcName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("delete PVC failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) allSuccess = false continue } } else { logrusField.Infof("deleted PVC %s/%s", namespace, pvcName) } // Step 3: Delete PV if err := d.deletePV(ctx, pvName); err != nil { if apierrors.IsNotFound(err) { logrusField.Infof("PV %s already deleted", pvName) } else { logrusField.Warningf("delete PV %s failed: %v", pvName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("delete PV failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) allSuccess = false continue } } else { logrusField.Infof("deleted PV %s", pvName) } status = status.CreateSuccessResourceStatus() status.State = v1alpha1.DestroyedState statuses = append(statuses, status) } if allSuccess { return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus(statuses)) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses)) } // createPV creates a PersistentVolume with an unreachable NFS server. // The PV will be Available, allowing PVC binding, but the NFS mount will fail // when a Pod tries to use it, causing the Pod to be stuck in ContainerCreating. func (d *PodContainerCreatingActionExecutor) createPV(ctx context.Context, pvName, experimentId string) error { pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: pvName, Annotations: map[string]string{ ChaosBladePVAnnotation: ChaosBladeActionCreate, ChaosBladeExperimentAnnotation: experimentId, }, }, Spec: v1.PersistentVolumeSpec{ Capacity: v1.ResourceList{ v1.ResourceStorage: resource.MustParse("1Gi"), }, AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, }, PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete, PersistentVolumeSource: v1.PersistentVolumeSource{ NFS: &v1.NFSVolumeSource{ // Use a non-routable IP address to simulate unreachable NFS server Server: "10.255.255.1", Path: "/chaosblade-fake-nfs", ReadOnly: false, }, }, }, } return d.client.Create(ctx, pv) } // createPVC creates a PersistentVolumeClaim that binds to the specified PV. // The PVC will be Bound to the PV, but the actual volume mount will fail // because the NFS server is unreachable. func (d *PodContainerCreatingActionExecutor) createPVC(ctx context.Context, namespace, pvcName, pvName, experimentId string) error { emptyStr := "" pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: namespace, Annotations: map[string]string{ ChaosBladePVCAnnotation: ChaosBladeActionCreate, ChaosBladeExperimentAnnotation: experimentId, }, }, Spec: v1.PersistentVolumeClaimSpec{ StorageClassName: &emptyStr, AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, }, Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: resource.MustParse("1Gi"), }, }, VolumeName: pvName, }, } return d.client.Create(ctx, pvc) } // createPod creates a Pod that mounts the given PVC, which will cause it to be // stuck in ContainerCreating state because the NFS mount fails. func (d *PodContainerCreatingActionExecutor) createPod(ctx context.Context, namespace, podName, pvcName, volumeMountPath, experimentId string) error { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, Namespace: namespace, Annotations: map[string]string{ ChaosBladePodAnnotation: ChaosBladeActionCreate, ChaosBladeExperimentAnnotation: experimentId, }, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "chaosblade-cc", Image: "busybox", Command: []string{ "sleep", "infinity", }, VolumeMounts: []v1.VolumeMount{ { Name: "chaosblade-cc-volume", MountPath: volumeMountPath, }, }, }, }, // Tolerate all taints so the Pod can be scheduled on any node Tolerations: []v1.Toleration{ { Operator: v1.TolerationOpExists, }, }, Volumes: []v1.Volume{ { Name: "chaosblade-cc-volume", VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, }, }, }, }, }, } return d.client.Create(ctx, pod) } // deletePod deletes a Pod by namespace and name func (d *PodContainerCreatingActionExecutor) deletePod(ctx context.Context, namespace, podName string) error { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, Namespace: namespace, }, } return d.client.Delete(ctx, pod) } // deletePVC deletes a PersistentVolumeClaim by namespace and name func (d *PodContainerCreatingActionExecutor) deletePVC(ctx context.Context, namespace, pvcName string) error { pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: namespace, }, } return d.client.Delete(ctx, pvc) } // waitForPVCBound polls until the PVC is in Bound state or timeout is reached func (d *PodContainerCreatingActionExecutor) waitForPVCBound(ctx context.Context, namespace, pvcName string, timeout time.Duration) error { deadline := time.Now().Add(timeout) for time.Now().Before(deadline) { pvc := &v1.PersistentVolumeClaim{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: pvcName}, pvc) if err != nil { return err } if pvc.Status.Phase == v1.ClaimBound { return nil } time.Sleep(1 * time.Second) } return fmt.Errorf("PVC %s/%s is not bound after %v", namespace, pvcName, timeout) } // deletePV deletes a PersistentVolume by name func (d *PodContainerCreatingActionExecutor) deletePV(ctx context.Context, pvName string) error { pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: pvName, }, } return d.client.Delete(ctx, pv) } // PreCreate implements model.ActionPreProcessor interface. // It validates the namespace and prepares the context for containercreating action. func (a *PodContainerCreatingActionSpec) PreCreate(ctx context.Context, expModel *spec.ExpModel, client *channel.Client) (context.Context, *spec.Response) { experimentId := model.GetExperimentIdFromContext(ctx) // Validate namespace: must be specified and only one value namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] if namespace == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if strings.Contains(namespace, ",") { return ctx, spec.ResponseFailWithFlags(spec.ParameterInvalidNSNotOne, model.ResourceNamespaceFlag.Name) } containerObjectMetaList := model.ContainerMatchedList{ model.ContainerObjectMeta{ Namespace: namespace, PodName: fmt.Sprintf("chaosblade-cc-%s-pod", experimentId), }, } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } // PreDestroy implements model.ActionPreProcessor interface. // It prepares the context for containercreating destroy flow. func (a *PodContainerCreatingActionSpec) PreDestroy(ctx context.Context, expModel *spec.ExpModel, client *channel.Client, oldExpStatus v1alpha1.ExperimentStatus) (context.Context, *spec.Response) { experimentId := model.GetExperimentIdFromContext(ctx) namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] containerObjectMetaList := model.ContainerMatchedList{ model.ContainerObjectMeta{ Namespace: namespace, PodName: fmt.Sprintf("chaosblade-cc-%s-pod", experimentId), }, } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } ================================================ FILE: exec/pod/containercreatingdisk.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "fmt" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) // PodContainerCreatingDiskActionSpec defines the action spec for containercreating-disk. // It creates a PVC with a specified StorageClass (triggering cloud disk provisioning) // and a Pod that mounts this PVC. When the cloud disk provisioner fails (due to zone // mismatch, disk type not supported, or quota exceeded), the PVC remains Pending and // the Pod is stuck in ContainerCreating. type PodContainerCreatingDiskActionSpec struct { spec.BaseExpActionCommandSpec client *channel.Client } func NewPodContainerCreatingDiskActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &PodContainerCreatingDiskActionSpec{ BaseExpActionCommandSpec: spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: "storage-class", Desc: "StorageClass name for PVC creation", Required: true, }, &spec.ExpFlag{ Name: "pv-capacity", Desc: "PVC storage capacity, default: 20Gi", }, &spec.ExpFlag{ Name: "volume-mount-path", Desc: "Volume mount path in the container, default: /mnt/data", }, }, ActionExecutor: &PodContainerCreatingDiskActionExecutor{client: client}, ActionExample: `# Create a pod stuck in ContainerCreating state by cloud disk PVC failure blade create k8s pod-pod containercreating-disk --namespace default --storage-class alicloud-disk-ssd --kubeconfig ~/.kube/config # Specify custom PV capacity blade create k8s pod-pod containercreating-disk --namespace default --storage-class alicloud-disk-ssd --pv-capacity 50Gi --kubeconfig ~/.kube/config # Specify custom volume mount path blade create k8s pod-pod containercreating-disk --namespace default --storage-class alicloud-disk-ssd --volume-mount-path /data --kubeconfig ~/.kube/config`, ActionCategories: []string{model.CategorySystemContainer}, }, client: client, } } func (*PodContainerCreatingDiskActionSpec) Name() string { return "containercreating-disk" } func (*PodContainerCreatingDiskActionSpec) Aliases() []string { return []string{} } func (*PodContainerCreatingDiskActionSpec) ShortDesc() string { return "Make pod stuck in ContainerCreating state by cloud disk PVC creation failure" } func (*PodContainerCreatingDiskActionSpec) LongDesc() string { return "Simulate the scenario where a Pod is stuck in ContainerCreating state due to cloud disk PVC creation failure. " + "This fault is injected by creating a PVC with the specified StorageClass (which triggers cloud disk provisioning), " + "then creating a Pod that mounts this PVC. When the cloud disk provisioner fails (due to zone mismatch, " + "disk type not supported, or quota exceeded), the PVC remains Pending and the Pod is stuck in ContainerCreating. " + "When the experiment is destroyed, the created Pod and PVC will be cleaned up." } // PodContainerCreatingDiskActionExecutor implements the create and destroy logic // for the containercreating-disk action. type PodContainerCreatingDiskActionExecutor struct { client *channel.Client } func (*PodContainerCreatingDiskActionExecutor) Name() string { return "containercreating-disk" } func (*PodContainerCreatingDiskActionExecutor) SetChannel(channel spec.Channel) {} func (d *PodContainerCreatingDiskActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(uid, ctx, expModel) } return d.create(uid, ctx, expModel) } func (d *PodContainerCreatingDiskActionExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } // Parse flags with defaults storageClass := expModel.ActionFlags["storage-class"] pvCapacity := expModel.ActionFlags["pv-capacity"] if pvCapacity == "" { pvCapacity = "20Gi" } volumeMountPath := expModel.ActionFlags["volume-mount-path"] if volumeMountPath == "" { volumeMountPath = "/mnt/data" } // Deduplicate by namespace - create one PVC+Pod per unique namespace seenNamespaces := make(map[string]bool) statuses := make([]v1alpha1.ResourceStatus, 0) success := false for _, meta := range containerObjectMetaList { if seenNamespaces[meta.Namespace] { continue } seenNamespaces[meta.Namespace] = true pvcName := fmt.Sprintf("chaosblade-ccd-%s-pvc", experimentId) podName := fmt.Sprintf("chaosblade-ccd-%s-pod", experimentId) status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: fmt.Sprintf("%s//%s", meta.Namespace, podName), } // Step 1: Create PVC with the specified StorageClass. // In clusters without a cloud disk provisioner, the PVC will remain Pending. if err := d.createPVC(ctx, meta.Namespace, pvcName, storageClass, pvCapacity, experimentId); err != nil { if apierrors.IsAlreadyExists(err) { logrusField.Infof("PVC %s/%s already exists, skip creation", meta.Namespace, pvcName) } else { logrusField.Warningf("create PVC %s/%s failed: %v", meta.Namespace, pvcName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("create PVC failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } } else { logrusField.Infof("created PVC %s/%s with StorageClass %s", meta.Namespace, pvcName, storageClass) } // Step 2: Create Pod that mounts the PVC. // The Pod will be stuck in ContainerCreating since the PVC is not bound. if err := d.createPod(ctx, meta.Namespace, podName, pvcName, volumeMountPath, experimentId); err != nil { if apierrors.IsAlreadyExists(err) { logrusField.Infof("Pod %s/%s already exists, skip creation", meta.Namespace, podName) } else { logrusField.Warningf("create Pod %s/%s failed: %v", meta.Namespace, podName, err) // Best-effort rollback: delete the PVC we just created. // If rollback fails, resources will be leaked because we record // a failed status and Destroy only processes successful ones. // To prevent leaks, we still record success so Destroy will // attempt cleanup (destroy is idempotent and handles NotFound). pvcDeleted := true if delErr := d.deletePVC(ctx, meta.Namespace, pvcName); delErr != nil { logrusField.Warningf("rollback PVC %s/%s failed: %v", meta.Namespace, pvcName, delErr) pvcDeleted = false } if pvcDeleted { status = status.CreateFailResourceStatus(fmt.Sprintf("create Pod failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) } else { logrusField.Warningf("rollback incomplete, recording success status to ensure Destroy can clean up") status = status.CreateSuccessResourceStatus() statuses = append(statuses, status) success = true } continue } } else { logrusField.Infof("created Pod %s/%s which will be stuck in ContainerCreating state", meta.Namespace, podName) } status = status.CreateSuccessResourceStatus() statuses = append(statuses, status) success = true } var experimentStatus v1alpha1.ExperimentStatus if success { experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses) } return spec.ReturnResultIgnoreCode(experimentStatus) } func (d *PodContainerCreatingDiskActionExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } statuses := make([]v1alpha1.ResourceStatus, 0) allSuccess := true seenNamespaces := make(map[string]bool) for _, meta := range containerObjectMetaList { if seenNamespaces[meta.Namespace] { continue } seenNamespaces[meta.Namespace] = true pvcName := fmt.Sprintf("chaosblade-ccd-%s-pvc", experimentId) podName := fmt.Sprintf("chaosblade-ccd-%s-pod", experimentId) namespace := meta.Namespace status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: fmt.Sprintf("%s//%s", namespace, podName), } // Step 1: Delete Pod if err := d.deletePod(ctx, namespace, podName); err != nil { if apierrors.IsNotFound(err) { logrusField.Infof("Pod %s/%s already deleted", namespace, podName) } else { logrusField.Warningf("delete Pod %s/%s failed: %v", namespace, podName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("delete Pod failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) allSuccess = false continue } } else { logrusField.Infof("deleted Pod %s/%s", namespace, podName) } // Step 2: Delete PVC if err := d.deletePVC(ctx, namespace, pvcName); err != nil { if apierrors.IsNotFound(err) { logrusField.Infof("PVC %s/%s already deleted", namespace, pvcName) } else { logrusField.Warningf("delete PVC %s/%s failed: %v", namespace, pvcName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("delete PVC failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) allSuccess = false continue } } else { logrusField.Infof("deleted PVC %s/%s", namespace, pvcName) } status = status.CreateSuccessResourceStatus() status.State = v1alpha1.DestroyedState statuses = append(statuses, status) } if allSuccess { return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus(statuses)) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses)) } // createPVC creates a PersistentVolumeClaim with the specified StorageClass. // The PVC will remain Pending if the cloud disk provisioner is unavailable or // misconfigured (zone mismatch, disk type not supported, quota exceeded). func (d *PodContainerCreatingDiskActionExecutor) createPVC(ctx context.Context, namespace, pvcName, storageClass, pvCapacity, experimentId string) error { pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: namespace, Annotations: map[string]string{ ChaosBladePVCAnnotation: ChaosBladeActionCreate, ChaosBladeExperimentAnnotation: experimentId, }, }, Spec: v1.PersistentVolumeClaimSpec{ StorageClassName: &storageClass, AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, }, Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: resource.MustParse(pvCapacity), }, }, }, } return d.client.Create(ctx, pvc) } // createPod creates a Pod that mounts the given PVC, which will cause it to be // stuck in ContainerCreating state because the PVC is not bound (cloud disk // provisioner failure). func (d *PodContainerCreatingDiskActionExecutor) createPod(ctx context.Context, namespace, podName, pvcName, volumeMountPath, experimentId string) error { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, Namespace: namespace, Annotations: map[string]string{ ChaosBladePodAnnotation: ChaosBladeActionCreate, ChaosBladeExperimentAnnotation: experimentId, }, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "chaosblade-ccd-container", Image: "busybox", Command: []string{"sleep", "infinity"}, VolumeMounts: []v1.VolumeMount{ { Name: "chaosblade-ccd-volume", MountPath: volumeMountPath, }, }, }, }, Tolerations: []v1.Toleration{ { Operator: v1.TolerationOpExists, }, }, Volumes: []v1.Volume{ { Name: "chaosblade-ccd-volume", VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, }, }, }, }, }, } return d.client.Create(ctx, pod) } // deletePod deletes a Pod by namespace and name. func (d *PodContainerCreatingDiskActionExecutor) deletePod(ctx context.Context, namespace, podName string) error { pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, Namespace: namespace, }, } return d.client.Delete(ctx, pod) } // deletePVC deletes a PersistentVolumeClaim by namespace and name. func (d *PodContainerCreatingDiskActionExecutor) deletePVC(ctx context.Context, namespace, pvcName string) error { pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvcName, Namespace: namespace, }, } return d.client.Delete(ctx, pvc) } // PreCreate implements model.ActionPreProcessor interface. // It validates namespace and storage-class, and prepares the context for // containercreating-disk action. func (a *PodContainerCreatingDiskActionSpec) PreCreate(ctx context.Context, expModel *spec.ExpModel, client *channel.Client) (context.Context, *spec.Response) { experimentId := model.GetExperimentIdFromContext(ctx) // Validate namespace: must be specified and only one value namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] if namespace == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if strings.Contains(namespace, ",") { return ctx, spec.ResponseFailWithFlags(spec.ParameterInvalidNSNotOne, model.ResourceNamespaceFlag.Name) } // Validate storage-class: must be specified storageClass := expModel.ActionFlags["storage-class"] if storageClass == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, "storage-class") } // Validate pv-capacity format if specified pvCapacity := expModel.ActionFlags["pv-capacity"] if pvCapacity != "" { if _, err := resource.ParseQuantity(pvCapacity); err != nil { return ctx, spec.ResponseFailWithFlags(spec.ParameterIllegal, "pv-capacity", pvCapacity, err.Error()) } } containerObjectMetaList := model.ContainerMatchedList{ model.ContainerObjectMeta{ Namespace: namespace, PodName: fmt.Sprintf("chaosblade-ccd-%s-pod", experimentId), }, } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } // PreDestroy implements model.ActionPreProcessor interface. // It prepares the context for containercreating-disk destroy flow. func (a *PodContainerCreatingDiskActionSpec) PreDestroy(ctx context.Context, expModel *spec.ExpModel, client *channel.Client, oldExpStatus v1alpha1.ExperimentStatus) (context.Context, *spec.Response) { experimentId := model.GetExperimentIdFromContext(ctx) namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] if namespace == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if strings.Contains(namespace, ",") { return ctx, spec.ResponseFailWithFlags(spec.ParameterInvalidNSNotOne, model.ResourceNamespaceFlag.Name) } containerObjectMetaList := model.ContainerMatchedList{ model.ContainerObjectMeta{ Namespace: namespace, PodName: fmt.Sprintf("chaosblade-ccd-%s-pod", experimentId), }, } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } ================================================ FILE: exec/pod/containercreatingdisk_test.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "testing" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) func TestPodContainerCreatingDiskActionSpec_Name(t *testing.T) { s := NewPodContainerCreatingDiskActionSpec(nil) if s.Name() != "containercreating-disk" { t.Errorf("expected name 'containercreating-disk', got '%s'", s.Name()) } } func TestPodContainerCreatingDiskActionSpec_ShortDesc(t *testing.T) { s := NewPodContainerCreatingDiskActionSpec(nil) if s.ShortDesc() == "" { t.Error("ShortDesc should not be empty") } } func TestPodContainerCreatingDiskActionSpec_LongDesc(t *testing.T) { s := NewPodContainerCreatingDiskActionSpec(nil) if s.LongDesc() == "" { t.Error("LongDesc should not be empty") } } func TestPodContainerCreatingDiskActionSpec_Aliases(t *testing.T) { s := NewPodContainerCreatingDiskActionSpec(nil) if len(s.Aliases()) != 0 { t.Errorf("expected no aliases, got %v", s.Aliases()) } } func TestPodContainerCreatingDiskActionSpec_ActionFlags(t *testing.T) { s := NewPodContainerCreatingDiskActionSpec(nil) flags := s.Flags() if flags == nil { t.Fatal("Flags should not be nil") } flagNames := make(map[string]bool) for _, f := range flags { flagNames[f.FlagName()] = true } if !flagNames["storage-class"] { t.Error("storage-class flag should exist") } if !flagNames["pv-capacity"] { t.Error("pv-capacity flag should exist") } if !flagNames["volume-mount-path"] { t.Error("volume-mount-path flag should exist") } } func TestPodContainerCreatingDiskActionSpec_ActionCategories(t *testing.T) { s := NewPodContainerCreatingDiskActionSpec(nil) categories := s.Categories() found := false for _, c := range categories { if c == model.CategorySystemContainer { found = true break } } if !found { t.Error("CategorySystemContainer should be present") } } func TestPodContainerCreatingDiskActionSpec_ActionExample(t *testing.T) { s := NewPodContainerCreatingDiskActionSpec(nil) example := s.Example() if example == "" { t.Error("Example should not be empty") } } func TestPodContainerCreatingDiskActionExecutor_Name(t *testing.T) { executor := &PodContainerCreatingDiskActionExecutor{} if executor.Name() != "containercreating-disk" { t.Errorf("expected name 'containercreating-disk', got '%s'", executor.Name()) } } func TestPreCreate_NamespaceEmpty(t *testing.T) { actionSpec := NewPodContainerCreatingDiskActionSpec(nil).(*PodContainerCreatingDiskActionSpec) expModel := &spec.ExpModel{ ActionFlags: map[string]string{ model.ResourceNamespaceFlag.Name: "", "storage-class": "alicloud-disk-ssd", }, } ctx := context.Background() ctx = model.SetExperimentIdToContext(ctx, "test-exp-001") _, resp := actionSpec.PreCreate(ctx, expModel, nil) if resp == nil { t.Fatal("expected error response for empty namespace") } if resp.Success { t.Error("expected PreCreate to fail for empty namespace") } } func TestPreCreate_NamespaceWithComma(t *testing.T) { actionSpec := NewPodContainerCreatingDiskActionSpec(nil).(*PodContainerCreatingDiskActionSpec) expModel := &spec.ExpModel{ ActionFlags: map[string]string{ model.ResourceNamespaceFlag.Name: "ns1,ns2", "storage-class": "alicloud-disk-ssd", }, } ctx := context.Background() ctx = model.SetExperimentIdToContext(ctx, "test-exp-002") _, resp := actionSpec.PreCreate(ctx, expModel, nil) if resp == nil { t.Fatal("expected error response for multi-value namespace") } if resp.Success { t.Error("expected PreCreate to fail for namespace with comma") } } func TestPreCreate_StorageClassEmpty(t *testing.T) { actionSpec := NewPodContainerCreatingDiskActionSpec(nil).(*PodContainerCreatingDiskActionSpec) expModel := &spec.ExpModel{ ActionFlags: map[string]string{ model.ResourceNamespaceFlag.Name: "default", "storage-class": "", }, } ctx := context.Background() ctx = model.SetExperimentIdToContext(ctx, "test-exp-003") _, resp := actionSpec.PreCreate(ctx, expModel, nil) if resp == nil { t.Fatal("expected error response for empty storage-class") } if resp.Success { t.Error("expected PreCreate to fail for empty storage-class") } } func TestPreCreate_Success(t *testing.T) { actionSpec := NewPodContainerCreatingDiskActionSpec(nil).(*PodContainerCreatingDiskActionSpec) expModel := &spec.ExpModel{ ActionFlags: map[string]string{ model.ResourceNamespaceFlag.Name: "default", "storage-class": "alicloud-disk-ssd", }, } ctx := context.Background() ctx = model.SetExperimentIdToContext(ctx, "test-exp-004") newCtx, resp := actionSpec.PreCreate(ctx, expModel, nil) if resp != nil { t.Fatalf("expected no error, got response: %+v", resp) } list, err := model.GetContainerObjectMetaListFromContext(newCtx) if err != nil { t.Fatalf("failed to get containerObjectMetaList: %v", err) } if len(list) != 1 { t.Fatalf("expected 1 container meta, got %d", len(list)) } if list[0].Namespace != "default" { t.Errorf("expected namespace 'default', got '%s'", list[0].Namespace) } } func TestPreDestroy_Success(t *testing.T) { actionSpec := NewPodContainerCreatingDiskActionSpec(nil).(*PodContainerCreatingDiskActionSpec) expModel := &spec.ExpModel{ ActionFlags: map[string]string{ model.ResourceNamespaceFlag.Name: "default", "storage-class": "alicloud-disk-ssd", }, } ctx := context.Background() ctx = model.SetExperimentIdToContext(ctx, "test-exp-005") newCtx, resp := actionSpec.PreDestroy(ctx, expModel, nil, v1alpha1.ExperimentStatus{}) if resp != nil { t.Fatalf("expected no error, got response: %+v", resp) } list, err := model.GetContainerObjectMetaListFromContext(newCtx) if err != nil { t.Fatalf("failed to get containerObjectMetaList: %v", err) } if len(list) != 1 { t.Fatalf("expected 1 container meta, got %d", len(list)) } if list[0].Namespace != "default" { t.Errorf("expected namespace 'default', got '%s'", list[0].Namespace) } } func TestPreDestroy_NamespaceEmpty(t *testing.T) { actionSpec := NewPodContainerCreatingDiskActionSpec(nil).(*PodContainerCreatingDiskActionSpec) expModel := &spec.ExpModel{ ActionFlags: map[string]string{ model.ResourceNamespaceFlag.Name: "", "storage-class": "alicloud-disk-ssd", }, } ctx := context.Background() ctx = model.SetExperimentIdToContext(ctx, "test-exp-006") _, resp := actionSpec.PreDestroy(ctx, expModel, nil, v1alpha1.ExperimentStatus{}) if resp == nil { t.Fatal("expected error response for empty namespace") } if resp.Success { t.Error("expected PreDestroy to fail for empty namespace") } } func TestPreCreate_PVCapacityInvalid(t *testing.T) { actionSpec := NewPodContainerCreatingDiskActionSpec(nil).(*PodContainerCreatingDiskActionSpec) expModel := &spec.ExpModel{ ActionFlags: map[string]string{ model.ResourceNamespaceFlag.Name: "default", "storage-class": "alicloud-disk-ssd", "pv-capacity": "invalid-capacity", }, } ctx := context.Background() ctx = model.SetExperimentIdToContext(ctx, "test-exp-007") _, resp := actionSpec.PreCreate(ctx, expModel, nil) if resp == nil { t.Fatal("expected error response for invalid pv-capacity") } if resp.Success { t.Error("expected PreCreate to fail for invalid pv-capacity") } } func TestPreCreate_PVCapacityValid(t *testing.T) { actionSpec := NewPodContainerCreatingDiskActionSpec(nil).(*PodContainerCreatingDiskActionSpec) expModel := &spec.ExpModel{ ActionFlags: map[string]string{ model.ResourceNamespaceFlag.Name: "default", "storage-class": "alicloud-disk-ssd", "pv-capacity": "50Gi", }, } ctx := context.Background() ctx = model.SetExperimentIdToContext(ctx, "test-exp-008") newCtx, resp := actionSpec.PreCreate(ctx, expModel, nil) if resp != nil { t.Fatalf("expected no error, got response: %+v", resp) } list, err := model.GetContainerObjectMetaListFromContext(newCtx) if err != nil { t.Fatalf("failed to get containerObjectMetaList: %v", err) } if len(list) != 1 { t.Fatalf("expected 1 container meta, got %d", len(list)) } if list[0].Namespace != "default" { t.Errorf("expected namespace 'default', got '%s'", list[0].Namespace) } } ================================================ FILE: exec/pod/controller.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "github.com/chaosblade-io/chaosblade-exec-cri/exec/container" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) type ExpController struct { model.BaseExperimentController } func NewExpController(client *channel.Client) model.ExperimentController { return &ExpController{ model.BaseExperimentController{ Client: client, ResourceModelSpec: NewResourceModelSpec(client), }, } } func (*ExpController) Name() string { return "pod" } // Create pod resource experiments func (e *ExpController) Create(ctx context.Context, expSpec v1alpha1.ExperimentSpec) *spec.Response { expModel := model.ExtractExpModelFromExperimentSpec(expSpec) experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) // Get action spec to check if it implements ActionPreProcessor actionSpec := e.ResourceModelSpec.GetExpActionModelSpec(expModel.Target, expModel.ActionName) if actionSpec != nil { if preProcessor, ok := actionSpec.(model.ActionPreProcessor); ok { newCtx, resp := preProcessor.PreCreate(ctx, expModel, e.Client) if resp != nil { return resp } ctx = newCtx logrusField.Infof("creating %s experiment with pre-processing", expModel.ActionName) return e.Exec(ctx, expModel) } } // Default flow: find matched pods and execute pods, resp := e.GetMatchedPodResources(ctx, *expModel) if !resp.Success { logrusField.Errorf("uid: %s, get matched pod experiment failed, %v", experimentId, resp.Err) resp.Result = v1alpha1.CreateFailExperimentStatus(resp.Err, []v1alpha1.ResourceStatus{}) } logrusField.Infof("creating pod experiment, pod count is %d", len(pods)) containerObjectMetaList := getContainerMatchedList(experimentId, pods) if len(containerObjectMetaList) == 0 { logrusField.Errorf("uid: %s, get container from context failed", experimentId) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return e.Exec(ctx, expModel) } func (e *ExpController) Destroy(ctx context.Context, expSpec v1alpha1.ExperimentSpec, oldExpStatus v1alpha1.ExperimentStatus) *spec.Response { expModel := model.ExtractExpModelFromExperimentSpec(expSpec) experimentId := model.GetExperimentIdFromContext(ctx) logrus.WithField("experiment", experimentId).Infoln("start to destroy") // Check if action implements ActionPreProcessor - use the same path as Create actionSpec := e.ResourceModelSpec.GetExpActionModelSpec(expModel.Target, expModel.ActionName) if actionSpec != nil { if preProcessor, ok := actionSpec.(model.ActionPreProcessor); ok { newCtx, resp := preProcessor.PreDestroy(ctx, expModel, e.Client, oldExpStatus) if resp != nil { return resp } ctx = newCtx return e.Exec(ctx, expModel) } } // Default flow: find matched containers and destroy statuses := oldExpStatus.ResStatuses if len(statuses) == 0 { return spec.ReturnSuccess(v1alpha1.CreateSuccessExperimentStatus([]v1alpha1.ResourceStatus{})) } containerObjectMetaList := model.ContainerMatchedList{} for _, status := range statuses { if !status.Success { continue } containerObjectMeta := model.ParseIdentifier(status.Identifier) containerObjectMeta.Id = status.Id containerObjectMetaList = append(containerObjectMetaList, containerObjectMeta) } if len(containerObjectMetaList) == 0 { return spec.ReturnSuccess(v1alpha1.CreateSuccessExperimentStatus([]v1alpha1.ResourceStatus{})) } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return e.Exec(ctx, expModel) } // getContainerMatchedList transports selected pods func getContainerMatchedList(experimentId string, pods []v1.Pod) model.ContainerMatchedList { containerObjectMetaList := model.ContainerMatchedList{} for _, p := range pods { containerId, containerName, runtime, err := model.GetOneAvailableContainerIdFromPod(p) if runtime == container.DockerRuntime { containerId = containerId[:12] } if err != nil { logrus.WithField("experiment", experimentId).WithField("pod", p.Name). Errorf("get an available container failed, %v", err) continue } containerObjectMetaList = append(containerObjectMetaList, model.ContainerObjectMeta{ ContainerId: containerId, ContainerName: containerName, ContainerRuntime: runtime, PodName: p.Name, NodeName: p.Spec.NodeName, Namespace: p.Namespace, }) } return containerObjectMetaList } ================================================ FILE: exec/pod/delete.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "fmt" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) type DeletePodActionSpec struct { spec.BaseExpActionCommandSpec } func NewDeletePodActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &DeletePodActionSpec{ spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: "random", Desc: "Randomly select pod", NoArgs: true, }, }, ActionExecutor: &DeletePodActionExecutor{client: client}, ActionExample: `# Deletes the POD under the specified default namespace that is app=guestbook blade create k8s pod-pod delete --labels app=guestbook --namespace default --evict-count 2 --kubeconfig ~/.kube/config`, ActionCategories: []string{model.CategorySystemContainer}, }, } } func (*DeletePodActionSpec) Name() string { return "delete" } func (*DeletePodActionSpec) Aliases() []string { return []string{} } func (*DeletePodActionSpec) ShortDesc() string { return "Delete pods" } func (*DeletePodActionSpec) LongDesc() string { return "Delete pods" } type DeletePodActionExecutor struct { client *channel.Client } func (*DeletePodActionExecutor) Name() string { return "delete" } func (*DeletePodActionExecutor) SetChannel(channel spec.Channel) { } func (d *DeletePodActionExecutor) Exec(uid string, ctx context.Context, model *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(uid, ctx, model) } else { return d.create(uid, ctx, model) } } func (d *DeletePodActionExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } statuses := make([]v1alpha1.ResourceStatus, 0) success := false for _, meta := range containerObjectMetaList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: fmt.Sprintf("%s/%s/%s", meta.Namespace, meta.NodeName, meta.PodName), } objectMeta := metav1.ObjectMeta{Name: meta.PodName, Namespace: meta.Namespace} err := d.client.Delete(context.TODO(), &v1.Pod{ObjectMeta: objectMeta}) if err != nil { logrus.WithField("experiment", model.GetExperimentIdFromContext(ctx)). Warningf("delete pod %s err, %v", meta.PodName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) } else { status = status.CreateSuccessResourceStatus() success = true } statuses = append(statuses, status) } var experimentStatus v1alpha1.ExperimentStatus if success { experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses) } return spec.ReturnResultIgnoreCode(experimentStatus) } func (d *DeletePodActionExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } experimentStatus := v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{}) statuses := experimentStatus.ResStatuses for _, c := range containerObjectMetaList { status := v1alpha1.ResourceStatus{ Id: c.Id, Kind: v1alpha1.PodKind, State: v1alpha1.DestroyedState, Success: true, Identifier: c.GetIdentifier(), } statuses = append(statuses, status) } experimentStatus.ResStatuses = statuses return spec.ReturnResultIgnoreCode(experimentStatus) } ================================================ FILE: exec/pod/failedmount.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "crypto/rand" "encoding/hex" "encoding/json" "fmt" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ( ChaosBladeFailedMountAction = "failedmount" ChaosBladeOriginalVolumesAnnotation = "chaosblade.io/original-volumes" ChaosBladeFailedMountVolumeNamePrefix = "chaosblade-fm-" ChaosBladeFailedMountVolumeMountPath = "/chaosblade-fm-nonexistent" FailedMountVolumeTypeConfigMap = "configmap" FailedMountVolumeTypeSecret = "secret" FailedMountVolumeTypePVC = "pvc" ) type FailedMountActionSpec struct { spec.BaseExpActionCommandSpec client *channel.Client } func NewFailedMountActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &FailedMountActionSpec{ BaseExpActionCommandSpec: spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: "workload-type", Desc: "Workload type: deployment, daemonset, statefulset. Default: deployment", Required: false, Default: "deployment", }, &spec.ExpFlag{ Name: "workload-name", Desc: "Workload name to inject failed mount volume", Required: true, }, &spec.ExpFlag{ Name: "volume-type", Desc: "Volume type to inject: configmap, secret, pvc", Required: true, }, &spec.ExpFlag{ Name: "with-initcontainer", Desc: "Mount the non-existent volume to init containers first. Default: false", Required: false, Default: "false", }, }, ActionExecutor: &FailedMountActionExecutor{client: client}, ActionExample: `# Mount a non-existent configmap volume to a deployment blade create k8s pod-pod failedmount --namespace default --workload-type deployment --workload-name nginx-app --volume-type configmap --kubeconfig ~/.kube/config # Mount a non-existent secret volume to a deployment with init container blade create k8s pod-pod failedmount --namespace default --workload-type deployment --workload-name nginx-app --volume-type secret --with-initcontainer true --kubeconfig ~/.kube/config # Mount a non-existent pvc volume to a statefulset blade create k8s pod-pod failedmount --namespace default --workload-type statefulset --workload-name redis-app --volume-type pvc --kubeconfig ~/.kube/config `, ActionCategories: []string{model.CategorySystemContainer}, }, client: client, } } func (*FailedMountActionSpec) Name() string { return "failedmount" } func (*FailedMountActionSpec) Aliases() []string { return []string{} } func (*FailedMountActionSpec) ShortDesc() string { return "Mount a non-existent configmap/secret/pvc volume to simulate volume mount failure" } func (*FailedMountActionSpec) LongDesc() string { return "Inject a fault by adding a volume referencing a non-existent ConfigMap, Secret, or PVC " + "to the target workload (Deployment/DaemonSet/StatefulSet). The volume name is randomly generated. " + "When --with-initcontainer is true, the volume mount is added to init containers first. " + "The original volume configuration is backed up in an annotation and restored when the experiment is destroyed." } // PreCreate implements model.ActionPreProcessor. func (a *FailedMountActionSpec) PreCreate(ctx context.Context, expModel *spec.ExpModel, client *channel.Client) (context.Context, *spec.Response) { namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] if namespace == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if strings.Contains(namespace, ",") { return ctx, spec.ResponseFailWithFlags(spec.ParameterInvalidNSNotOne, model.ResourceNamespaceFlag.Name) } if workloadName == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, "workload-name") } volumeType := expModel.ActionFlags["volume-type"] if volumeType == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, "volume-type") } if volumeType != FailedMountVolumeTypeConfigMap && volumeType != FailedMountVolumeTypeSecret && volumeType != FailedMountVolumeTypePVC { return ctx, spec.ResponseFailWithFlags(spec.ParameterIllegal, "volume-type", volumeType, "must be one of: configmap, secret, pvc") } containerObjectMetaList := model.ContainerMatchedList{ model.ContainerObjectMeta{ Namespace: namespace, PodName: fmt.Sprintf("chaosblade-fm-%s-%s", workloadType, workloadName), }, } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } // PreDestroy implements model.ActionPreProcessor. func (a *FailedMountActionSpec) PreDestroy(ctx context.Context, expModel *spec.ExpModel, client *channel.Client, oldExpStatus v1alpha1.ExperimentStatus) (context.Context, *spec.Response) { namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] if namespace == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if strings.Contains(namespace, ",") { return ctx, spec.ResponseFailWithFlags(spec.ParameterInvalidNSNotOne, model.ResourceNamespaceFlag.Name) } if workloadName == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, "workload-name") } containerObjectMetaList := model.ContainerMatchedList{ model.ContainerObjectMeta{ Namespace: namespace, PodName: fmt.Sprintf("chaosblade-fm-%s-%s", workloadType, workloadName), }, } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } type FailedMountActionExecutor struct { client *channel.Client } func (*FailedMountActionExecutor) Name() string { return "failedmount" } func (*FailedMountActionExecutor) SetChannel(channel spec.Channel) {} func (d *FailedMountActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(uid, ctx, expModel) } return d.create(uid, ctx, expModel) } func (d *FailedMountActionExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] volumeType := expModel.ActionFlags["volume-type"] withInitContainer := strings.EqualFold(expModel.ActionFlags["with-initcontainer"], "true") if namespace == "" { util.Errorf(uid, util.GetRunFuncName(), "namespace is required") return spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if workloadName == "" { util.Errorf(uid, util.GetRunFuncName(), "workload-name is required") return spec.ResponseFailWithFlags(spec.ParameterLess, "workload-name") } if volumeType == "" { util.Errorf(uid, util.GetRunFuncName(), "volume-type is required") return spec.ResponseFailWithFlags(spec.ParameterLess, "volume-type") } if volumeType != FailedMountVolumeTypeConfigMap && volumeType != FailedMountVolumeTypeSecret && volumeType != FailedMountVolumeTypePVC { util.Errorf(uid, util.GetRunFuncName(), fmt.Sprintf("invalid volume-type: %s", volumeType)) return spec.ResponseFailWithFlags(spec.ParameterIllegal, "volume-type", volumeType, "must be one of: configmap, secret, pvc") } status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: fmt.Sprintf("%s//%s//%s", namespace, workloadType, workloadName), } switch workloadType { case "deployment": deployment := &appsv1.Deployment{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, deployment) if err != nil { if apierrors.IsNotFound(err) { logrusField.Warningf("deployment %s/%s not found", namespace, workloadName) status = status.CreateFailResourceStatus(fmt.Sprintf("deployment not found: %v", err), spec.K8sExecFailed.Code) } else { logrusField.Warningf("get deployment %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("get deployment failed: %v", err), spec.K8sExecFailed.Code) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } if err := d.injectDeploymentFailedMount(ctx, deployment, volumeType, withInitContainer, experimentId); err != nil { logrusField.Warningf("inject failed mount to deployment %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("inject failed mount failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("injected failed mount to deployment %s/%s with volume-type=%s", namespace, workloadName, volumeType) case "daemonset": daemonset := &appsv1.DaemonSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, daemonset) if err != nil { if apierrors.IsNotFound(err) { logrusField.Warningf("daemonset %s/%s not found", namespace, workloadName) status = status.CreateFailResourceStatus(fmt.Sprintf("daemonset not found: %v", err), spec.K8sExecFailed.Code) } else { logrusField.Warningf("get daemonset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("get daemonset failed: %v", err), spec.K8sExecFailed.Code) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } if err := d.injectDaemonSetFailedMount(ctx, daemonset, volumeType, withInitContainer, experimentId); err != nil { logrusField.Warningf("inject failed mount to daemonset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("inject failed mount failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("injected failed mount to daemonset %s/%s with volume-type=%s", namespace, workloadName, volumeType) case "statefulset": statefulset := &appsv1.StatefulSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, statefulset) if err != nil { if apierrors.IsNotFound(err) { logrusField.Warningf("statefulset %s/%s not found", namespace, workloadName) status = status.CreateFailResourceStatus(fmt.Sprintf("statefulset not found: %v", err), spec.K8sExecFailed.Code) } else { logrusField.Warningf("get statefulset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("get statefulset failed: %v", err), spec.K8sExecFailed.Code) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } if err := d.injectStatefulSetFailedMount(ctx, statefulset, volumeType, withInitContainer, experimentId); err != nil { logrusField.Warningf("inject failed mount to statefulset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("inject failed mount failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("injected failed mount to statefulset %s/%s with volume-type=%s", namespace, workloadName, volumeType) default: status = status.CreateFailResourceStatus(fmt.Sprintf("unsupported workload type: %s", workloadType), spec.ParameterIllegal.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } status = status.CreateSuccessResourceStatus() return spec.ReturnResultIgnoreCode(v1alpha1.CreateSuccessExperimentStatus([]v1alpha1.ResourceStatus{status})) } func (d *FailedMountActionExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: fmt.Sprintf("%s//%s//%s", namespace, workloadType, workloadName), } switch workloadType { case "deployment": deployment := &appsv1.Deployment{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, deployment) if resp, handled := handleGetError(err, namespace, workloadType, workloadName, &status, logrusField); handled { return resp } if err := d.restoreDeploymentVolumes(ctx, deployment, experimentId); err != nil { logrusField.Warningf("restore deployment %s/%s volumes failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("restore deployment volumes failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("restored deployment %s/%s volumes", namespace, workloadName) case "daemonset": daemonset := &appsv1.DaemonSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, daemonset) if resp, handled := handleGetError(err, namespace, workloadType, workloadName, &status, logrusField); handled { return resp } if err := d.restoreDaemonSetVolumes(ctx, daemonset, experimentId); err != nil { logrusField.Warningf("restore daemonset %s/%s volumes failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("restore daemonset volumes failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("restored daemonset %s/%s volumes", namespace, workloadName) case "statefulset": statefulset := &appsv1.StatefulSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, statefulset) if resp, handled := handleGetError(err, namespace, workloadType, workloadName, &status, logrusField); handled { return resp } if err := d.restoreStatefulSetVolumes(ctx, statefulset, experimentId); err != nil { logrusField.Warningf("restore statefulset %s/%s volumes failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("restore statefulset volumes failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("restored statefulset %s/%s volumes", namespace, workloadName) default: status = status.CreateFailResourceStatus(fmt.Sprintf("unsupported workload type: %s", workloadType), spec.ParameterIllegal.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } status = status.CreateSuccessResourceStatus() status.State = v1alpha1.DestroyedState return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{status})) } // volumeBackup stores the injected volume name so destroy knows exactly what to remove. type volumeBackup struct { VolumeName string `json:"volumeName"` VolumeType string `json:"volumeType"` MountedTo string `json:"mountedTo"` // "initContainers" or "containers" } // generateRandomHash generates a 12-character hex string. func generateRandomHash() string { b := make([]byte, 6) if _, err := rand.Read(b); err != nil { return "a1b2c3d4e5f6" } return hex.EncodeToString(b) } // buildFailedMountVolume creates a Volume with a non-existent configmap/secret/pvc reference. func buildFailedMountVolume(volumeName, volumeType string) v1.Volume { fakeRef := "chaosblade-nonexistent-" + generateRandomHash() vol := v1.Volume{Name: volumeName} switch volumeType { case FailedMountVolumeTypeConfigMap: vol.VolumeSource = v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{Name: fakeRef}, }, } case FailedMountVolumeTypeSecret: vol.VolumeSource = v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: fakeRef, }, } case FailedMountVolumeTypePVC: vol.VolumeSource = v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: fakeRef, }, } } return vol } // injectFailedMountVolume adds a non-existent volume and mounts it to the target containers. // If withInitContainer is true, mounts to init containers; otherwise mounts to regular containers. func injectFailedMountVolume(podSpec *v1.PodSpec, annotations map[string]string, volumeType string, withInitContainer bool) (*volumeBackup, error) { volumeName := ChaosBladeFailedMountVolumeNamePrefix + generateRandomHash() mountedTo := "containers" if withInitContainer { if len(podSpec.InitContainers) == 0 { return nil, fmt.Errorf("the specified pod has no initContainers") } mountedTo = "initContainers" } vol := buildFailedMountVolume(volumeName, volumeType) podSpec.Volumes = append(podSpec.Volumes, vol) volumeMount := v1.VolumeMount{ Name: volumeName, MountPath: ChaosBladeFailedMountVolumeMountPath + "/" + volumeName, } if withInitContainer { for i := range podSpec.InitContainers { podSpec.InitContainers[i].VolumeMounts = append(podSpec.InitContainers[i].VolumeMounts, volumeMount) } } else { for i := range podSpec.Containers { podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, volumeMount) } } backup := &volumeBackup{ VolumeName: volumeName, VolumeType: volumeType, MountedTo: mountedTo, } backupBytes, err := json.Marshal(backup) if err != nil { return nil, fmt.Errorf("marshal volume backup failed: %v", err) } annotations[ChaosBladeOriginalVolumesAnnotation] = string(backupBytes) return backup, nil } // removeInjectedVolume removes the injected volume and its mounts from the pod spec. // It also validates that the volume being removed matches the expected type from backup. func removeInjectedVolume(podSpec *v1.PodSpec, backup *volumeBackup) error { found := false newVolumes := make([]v1.Volume, 0, len(podSpec.Volumes)) for _, vol := range podSpec.Volumes { if vol.Name == backup.VolumeName { found = true if err := validateVolumeType(&vol, backup.VolumeType); err != nil { return fmt.Errorf("sanity check failed for volume %q: %w", backup.VolumeName, err) } continue } newVolumes = append(newVolumes, vol) } if !found { logrus.Warnf("injected volume %q not found in pod spec, it may have been removed externally", backup.VolumeName) } podSpec.Volumes = newVolumes if backup.MountedTo == "initContainers" { for i := range podSpec.InitContainers { mounts := make([]v1.VolumeMount, 0, len(podSpec.InitContainers[i].VolumeMounts)) for _, m := range podSpec.InitContainers[i].VolumeMounts { if m.Name != backup.VolumeName { mounts = append(mounts, m) } } podSpec.InitContainers[i].VolumeMounts = mounts } } else { for i := range podSpec.Containers { mounts := make([]v1.VolumeMount, 0, len(podSpec.Containers[i].VolumeMounts)) for _, m := range podSpec.Containers[i].VolumeMounts { if m.Name != backup.VolumeName { mounts = append(mounts, m) } } podSpec.Containers[i].VolumeMounts = mounts } } return nil } // validateVolumeType checks that the volume's actual source type matches the expected type from backup. func validateVolumeType(vol *v1.Volume, expectedType string) error { switch expectedType { case FailedMountVolumeTypeConfigMap: if vol.ConfigMap == nil { return fmt.Errorf("expected configmap volume but found different type") } case FailedMountVolumeTypeSecret: if vol.Secret == nil { return fmt.Errorf("expected secret volume but found different type") } case FailedMountVolumeTypePVC: if vol.PersistentVolumeClaim == nil { return fmt.Errorf("expected pvc volume but found different type") } default: return fmt.Errorf("unknown volume type %q in backup annotation", expectedType) } return nil } // restoreVolumeFromAnnotation parses the backup annotation and removes the injected volume. func restoreVolumeFromAnnotation(podSpec *v1.PodSpec, annotations map[string]string) error { backupStr, ok := annotations[ChaosBladeOriginalVolumesAnnotation] if !ok || backupStr == "" { return fmt.Errorf("volume backup annotation not found") } var backup volumeBackup if err := json.Unmarshal([]byte(backupStr), &backup); err != nil { return fmt.Errorf("unmarshal volume backup failed: %v", err) } return removeInjectedVolume(podSpec, &backup) } // --- Deployment --- func (d *FailedMountActionExecutor) injectDeploymentFailedMount(ctx context.Context, deployment *appsv1.Deployment, volumeType string, withInitContainer bool, experimentId string) error { if deployment.Annotations == nil { deployment.Annotations = make(map[string]string) } if err := ensureNoConflictingExperiment(deployment.Annotations, experimentId); err != nil { return err } if deployment.Annotations[ChaosBladeExperimentAnnotation] == experimentId { return nil } deployment.Annotations[ChaosBladeDeploymentAnnotation] = ChaosBladeFailedMountAction deployment.Annotations[ChaosBladeExperimentAnnotation] = experimentId if _, err := injectFailedMountVolume(&deployment.Spec.Template.Spec, deployment.Annotations, volumeType, withInitContainer); err != nil { return err } return d.client.Update(ctx, deployment) } func (d *FailedMountActionExecutor) restoreDeploymentVolumes(ctx context.Context, deployment *appsv1.Deployment, experimentId string) error { if deployment.Annotations[ChaosBladeExperimentAnnotation] != experimentId { return fmt.Errorf("deployment was not modified by experiment %s", experimentId) } if err := restoreVolumeFromAnnotation(&deployment.Spec.Template.Spec, deployment.Annotations); err != nil { return err } delete(deployment.Annotations, ChaosBladeDeploymentAnnotation) delete(deployment.Annotations, ChaosBladeExperimentAnnotation) delete(deployment.Annotations, ChaosBladeOriginalVolumesAnnotation) return d.client.Update(ctx, deployment) } // --- DaemonSet --- func (d *FailedMountActionExecutor) injectDaemonSetFailedMount(ctx context.Context, daemonset *appsv1.DaemonSet, volumeType string, withInitContainer bool, experimentId string) error { if daemonset.Annotations == nil { daemonset.Annotations = make(map[string]string) } if err := ensureNoConflictingExperiment(daemonset.Annotations, experimentId); err != nil { return err } if daemonset.Annotations[ChaosBladeExperimentAnnotation] == experimentId { return nil } daemonset.Annotations[ChaosBladeDaemonSetAnnotation] = ChaosBladeFailedMountAction daemonset.Annotations[ChaosBladeExperimentAnnotation] = experimentId if _, err := injectFailedMountVolume(&daemonset.Spec.Template.Spec, daemonset.Annotations, volumeType, withInitContainer); err != nil { return err } return d.client.Update(ctx, daemonset) } func (d *FailedMountActionExecutor) restoreDaemonSetVolumes(ctx context.Context, daemonset *appsv1.DaemonSet, experimentId string) error { if daemonset.Annotations[ChaosBladeExperimentAnnotation] != experimentId { return fmt.Errorf("daemonset was not modified by experiment %s", experimentId) } if err := restoreVolumeFromAnnotation(&daemonset.Spec.Template.Spec, daemonset.Annotations); err != nil { return err } delete(daemonset.Annotations, ChaosBladeDaemonSetAnnotation) delete(daemonset.Annotations, ChaosBladeExperimentAnnotation) delete(daemonset.Annotations, ChaosBladeOriginalVolumesAnnotation) return d.client.Update(ctx, daemonset) } // --- StatefulSet --- func (d *FailedMountActionExecutor) injectStatefulSetFailedMount(ctx context.Context, statefulset *appsv1.StatefulSet, volumeType string, withInitContainer bool, experimentId string) error { if statefulset.Annotations == nil { statefulset.Annotations = make(map[string]string) } if err := ensureNoConflictingExperiment(statefulset.Annotations, experimentId); err != nil { return err } if statefulset.Annotations[ChaosBladeExperimentAnnotation] == experimentId { return nil } statefulset.Annotations[ChaosBladeStatefulSetAnnotation] = ChaosBladeFailedMountAction statefulset.Annotations[ChaosBladeExperimentAnnotation] = experimentId if _, err := injectFailedMountVolume(&statefulset.Spec.Template.Spec, statefulset.Annotations, volumeType, withInitContainer); err != nil { return err } return d.client.Update(ctx, statefulset) } func (d *FailedMountActionExecutor) restoreStatefulSetVolumes(ctx context.Context, statefulset *appsv1.StatefulSet, experimentId string) error { if statefulset.Annotations[ChaosBladeExperimentAnnotation] != experimentId { return fmt.Errorf("statefulset was not modified by experiment %s", experimentId) } if err := restoreVolumeFromAnnotation(&statefulset.Spec.Template.Spec, statefulset.Annotations); err != nil { return err } delete(statefulset.Annotations, ChaosBladeStatefulSetAnnotation) delete(statefulset.Annotations, ChaosBladeExperimentAnnotation) delete(statefulset.Annotations, ChaosBladeOriginalVolumesAnnotation) return d.client.Update(ctx, statefulset) } ================================================ FILE: exec/pod/failexp.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "fmt" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) type FailPodActionSpec struct { spec.BaseExpActionCommandSpec } func NewFailPodActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &FailPodActionSpec{ spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{}, }, ActionExecutor: &FailPodActionExecutor{client: client}, ActionExample: `# Specify POD exception blade create k8s pod-pod fail --labels "app=test" --namespace default `, ActionCategories: []string{model.CategorySystemContainer}, }, } } func (*FailPodActionSpec) Name() string { return "fail" } func (*FailPodActionSpec) Aliases() []string { return []string{} } func (*FailPodActionSpec) ShortDesc() string { return "Fail pods" } func (*FailPodActionSpec) LongDesc() string { return "Fail pods" } type FailPodActionExecutor struct { client *channel.Client } func (*FailPodActionExecutor) Name() string { return "fail" } func (*FailPodActionExecutor) SetChannel(channel spec.Channel) { } func (d *FailPodActionExecutor) Exec(uid string, ctx context.Context, model *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(ctx, model) } else { return d.create(ctx, model) } } func (d *FailPodActionExecutor) create(ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(experimentId, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } statuses := make([]v1alpha1.ResourceStatus, 0) success := false for _, c := range containerMatchedList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: c.GetIdentifier(), } objectMeta := types.NamespacedName{Name: c.PodName, Namespace: c.Namespace} pod := &v1.Pod{} err := d.client.Get(context.TODO(), objectMeta, pod) if err != nil { logrusField.Errorf("get pod %s err, %v", c.PodName, err) status = status.CreateFailResourceStatus(spec.K8sExecFailed.Sprintf("get", err), spec.K8sExecFailed.Code) } if !isPodReady(pod) { logrusField.Infof("pod %s is not ready", c.PodName) statuses = append(statuses, status.CreateFailResourceStatus(spec.PodNotReady.Sprintf(c.PodName), spec.PodNotReady.Code)) continue } if err := d.failPod(ctx, pod); err != nil { logrusField.Warningf("fail pod %s err, %v", c.PodName, err) status = status.CreateFailResourceStatus(spec.K8sExecFailed.Sprintf("update", err), spec.K8sExecFailed.Code) } else { status = status.CreateSuccessResourceStatus() success = true } statuses = append(statuses, status) } var experimentStatus v1alpha1.ExperimentStatus if success { experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses) } return spec.ReturnResultIgnoreCode(experimentStatus) } func (d *FailPodActionExecutor) destroy(ctx context.Context, expModel *spec.ExpModel) *spec.Response { containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx) experimentId := model.GetExperimentIdFromContext(ctx) if err != nil { util.Errorf(experimentId, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } logrusField := logrus.WithField("experiment", experimentId) experimentStatus := v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{}) statuses := experimentStatus.ResStatuses for _, c := range containerMatchedList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: c.GetIdentifier(), } objectMeta := types.NamespacedName{Name: c.PodName, Namespace: c.Namespace} pod := &v1.Pod{} err := d.client.Get(context.TODO(), objectMeta, pod) if err != nil { logrusField.Errorf("get pod %s err, %v", c.PodName, err) status = status.CreateFailResourceStatus(spec.K8sExecFailed.Sprintf("get", err), spec.K8sExecFailed.Code) continue } err = d.client.Delete(context.TODO(), pod) if err != nil { logrusField.Errorf("delete pod %s err, %v", c.PodName, err) status = status.CreateFailResourceStatus(spec.K8sExecFailed.Sprintf("delete", err), spec.K8sExecFailed.Code) continue } } experimentStatus.ResStatuses = statuses return spec.ReturnResultIgnoreCode(experimentStatus) } // failPod will exec failPod experiment func (d *FailPodActionExecutor) failPod(ctx context.Context, pod *v1.Pod) error { for i, container := range pod.Spec.Containers { key := fmt.Sprintf("%s-%s", "failPod", container.Name) if pod.Annotations == nil { pod.Annotations = make(map[string]string) } if isAnnotationExist(pod.Annotations, key) { continue } pod.Annotations[key] = container.Image pod.Spec.Containers[i].Image = fmt.Sprintf("%s-fault-injection", container.Image) } if err := d.client.Update(ctx, pod); err != nil { return err } return nil } // isAnnotationExist will check this pod has been tested func isAnnotationExist(annotation map[string]string, key string) bool { _, ok := annotation[key] if !ok { return false } return true } ================================================ FILE: exec/pod/fsexp.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "fmt" "strconv" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" chaosfs "github.com/chaosblade-io/chaosblade-operator/pkg/hookfs" webhook "github.com/chaosblade-io/chaosblade-operator/pkg/webhook/pod" ) type PodIOActionSpec struct { spec.BaseExpActionCommandSpec } func NewPodIOActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &PodIOActionSpec{ spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: "method", Desc: "inject methods, only support read and write", }, &spec.ExpFlag{ Name: "delay", Desc: "file io delay time, ms", }, }, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: "path", Desc: "I/O exception path or file", }, &spec.ExpFlag{ Name: "random", Desc: "random inject I/O code", NoArgs: true, }, &spec.ExpFlag{ Name: "percent", Desc: "I/O error percent [0-100],", }, &spec.ExpFlag{ Name: "errno", Desc: "I/O error code", }, }, ActionExecutor: &PodIOActionExecutor{client: client}, ActionExample: `# Two types of exceptions were injected for the READ operation, with an exception rate of 60 percent blade create k8s pod-pod IO --method read --delay 1000 --path /home --percent 60 --errno 28 --labels "app=test" --namespace default`, ActionCategories: []string{model.CategorySystemContainer}, }, } } func (*PodIOActionSpec) Name() string { return "IO" } func (*PodIOActionSpec) Aliases() []string { return []string{} } func (*PodIOActionSpec) ShortDesc() string { return "Pod File System IO Exception" } func (*PodIOActionSpec) LongDesc() string { return "Pod File System IO Exception" } type PodIOActionExecutor struct { client *channel.Client } func (*PodIOActionExecutor) Name() string { return "IO" } func (*PodIOActionExecutor) SetChannel(channel spec.Channel) { } func (d *PodIOActionExecutor) Exec(uid string, ctx context.Context, model *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(ctx, model) } else { return d.create(ctx, model) } } func (d *PodIOActionExecutor) create(ctx context.Context, expModel *spec.ExpModel) *spec.Response { containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx) experimentId := model.GetExperimentIdFromContext(ctx) if err != nil { util.Errorf(experimentId, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithFlags(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } logrusField := logrus.WithField("experiment", experimentId) statuses := make([]v1alpha1.ResourceStatus, 0) success := false for _, c := range containerMatchedList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: c.GetIdentifier(), } pod := &v1.Pod{} err := d.client.Get(context.TODO(), client.ObjectKey{Namespace: c.Namespace, Name: c.PodName}, pod) if err != nil { logrusField.Errorf("get pod %s err, %v", c.PodName, err) statuses = append(statuses, status.CreateFailResourceStatus( spec.K8sExecFailed.Sprintf("get", err), spec.K8sExecFailed.Code, )) continue } if !isPodReady(pod) { logrusField.Infof("pod %s is not ready", c.PodName) statuses = append(statuses, status.CreateFailResourceStatus(spec.PodNotReady.Msg, spec.PodNotReady.Code)) continue } methods, ok := expModel.ActionFlags["method"] if !ok && len(methods) != 0 { logrusField.Error("method cannot be empty") statuses = append(statuses, status.CreateFailResourceStatus( spec.ParameterLess.Sprintf("method"), spec.ParameterLess.Code, )) continue } var delay, percent, errno int delayStr, ok := expModel.ActionFlags["delay"] if ok && len(delayStr) != 0 { delay, err = strconv.Atoi(delayStr) if err != nil { logrusField.Error("delay must be integer") statuses = append(statuses, status.CreateFailResourceStatus( spec.ParameterIllegal.Sprintf("delay", delayStr, err), spec.ParameterIllegal.Code, )) continue } } percentStr, ok := expModel.ActionFlags["percent"] if ok && len(percentStr) != 0 { if percent, err = strconv.Atoi(percentStr); err != nil { logrusField.Error("percent must be integer") statuses = append(statuses, status.CreateFailResourceStatus( spec.ParameterIllegal.Sprintf("percent", percentStr, err), spec.ParameterIllegal.Code, )) continue } } errnoStr, ok := expModel.ActionFlags["errno"] if ok && len(errnoStr) != 0 { if errno, err = strconv.Atoi(errnoStr); err != nil { logrusField.Error("errno must be integer") statuses = append(statuses, status.CreateFailResourceStatus( spec.ParameterIllegal.Sprintf("errno", errnoStr, err), spec.ParameterIllegal.Code, )) continue } } random := false randomStr, ok := expModel.ActionFlags["random"] if ok && randomStr == "true" { random = true } request := &chaosfs.InjectMessage{ Methods: strings.Split(methods, ","), Path: expModel.ActionFlags["path"], Delay: uint32(delay), Percent: uint32(percent), Random: random, Errno: uint32(errno), } chaosfsClient, err := getChaosfsClient(pod) if err != nil { logrusField.WithField("pod", c.PodName).WithField("request", request). Errorf("init chaosfs client failed: %v", err) statuses = append(statuses, status.CreateFailResourceStatus( spec.ChaosfsClientFailed.Sprintf(pod.Name, err), spec.ChaosfsClientFailed.Code, )) continue } err = chaosfsClient.InjectFault(ctx, request) if err != nil { logrusField.Errorf("inject io exception in pod %s failed, request %v, err: %v", c.PodName, request, err) statuses = append(statuses, status.CreateFailResourceStatus( spec.ChaosfsInjectFailed.Sprintf(pod.Name, request, err), spec.ChaosfsInjectFailed.Code, )) continue } statuses = append(statuses, status.CreateSuccessResourceStatus()) success = true } var experimentStatus v1alpha1.ExperimentStatus if success { experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses) } return spec.ReturnResultIgnoreCode(experimentStatus) } func (d *PodIOActionExecutor) destroy(ctx context.Context, expModel *spec.ExpModel) *spec.Response { containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx) experimentId := model.GetExperimentIdFromContext(ctx) if err != nil { util.Errorf(experimentId, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } logrusField := logrus.WithField("experiment", experimentId) experimentStatus := v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{}) statuses := experimentStatus.ResStatuses for _, c := range containerMatchedList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: c.GetIdentifier(), } pod := &v1.Pod{} err := d.client.Get(context.TODO(), client.ObjectKey{Namespace: c.Namespace, Name: c.PodName}, pod) if err != nil { logrusField.Errorf("get pod %s err, %v", c.PodName, err) continue } if !isPodReady(pod) { logrusField.Errorf("pod %s is not ready", c.PodName) continue } chaosfsClient, err := getChaosfsClient(pod) if err != nil { logrusField.Errorf("init chaosfs client failed in pod %v, err: %v", pod.Name, err) statuses = append(statuses, status.CreateFailResourceStatus( spec.ChaosfsClientFailed.Sprintf(pod.Name, err), spec.ChaosfsClientFailed.Code, )) continue } err = chaosfsClient.Revoke(ctx) if err != nil { logrusField.Errorf("recover io exception failed in pod %v, err: %v", c.PodName, err) statuses = append(statuses, status.CreateFailResourceStatus( spec.ChaosfsRecoverFailed.Sprintf(pod.Name, err), spec.ChaosfsRecoverFailed.Code, )) continue } } experimentStatus.ResStatuses = statuses return spec.ReturnResultIgnoreCode(experimentStatus) } func isPodReady(pod *v1.Pod) bool { if pod.ObjectMeta.DeletionTimestamp != nil { return false } for _, condition := range pod.Status.Conditions { if condition.Type == v1.PodReady && condition.Status == v1.ConditionTrue { return true } } return false } func getChaosfsClient(pod *v1.Pod) (*chaosfs.ChaosBladeHookClient, error) { port, err := getContainerPort(webhook.FuseServerPortName, pod) if err != nil { return nil, err } addr := fmt.Sprintf("%s:%d", pod.Status.PodIP, port) return chaosfs.NewChabladeHookClient(addr), nil } func getContainerPort(portName string, pod *v1.Pod) (int32, error) { for _, container := range pod.Spec.Containers { for _, port := range container.Ports { if port.Name == portName { return port.ContainerPort, nil } } } return 0, fmt.Errorf("can not found fuse-server container port ") } ================================================ FILE: exec/pod/imageconfigexp.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "fmt" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) type ImageConfigActionSpec struct { spec.BaseExpActionCommandSpec } const ( ImageNameFlag = "image-name" ImageTagFlag = "image-tag" ) func NewImageConfigActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &ImageConfigActionSpec{ spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: ImageNameFlag, Desc: "The image name to replace the original image name, e.g. nginx-not-exist", }, &spec.ExpFlag{ Name: ImageTagFlag, Desc: "The image tag to replace the original image tag, e.g. non-existent-tag", }, }, ActionExecutor: &ImageConfigActionExecutor{client: client}, ActionExample: `# Inject image config error to pods with default behavior blade create k8s pod-pod imageconfig --labels "app=test" --namespace default # Inject image config error with custom image name blade create k8s pod-pod imageconfig --labels "app=test" --namespace default --image-name nginx-not-exist # Inject image config error with custom image tag blade create k8s pod-pod imageconfig --labels "app=test" --namespace default --image-tag non-existent-tag # Inject image config error with both custom image name and tag blade create k8s pod-pod imageconfig --labels "app=test" --namespace default --image-name nginx-not-exist --image-tag non-existent-tag `, ActionCategories: []string{model.CategorySystemContainer}, }, } } func (*ImageConfigActionSpec) Name() string { return "imageconfig" } func (*ImageConfigActionSpec) Aliases() []string { return []string{} } func (*ImageConfigActionSpec) ShortDesc() string { return "Inject image config error to pods" } func (*ImageConfigActionSpec) LongDesc() string { return "Modify pod container image to a non-existent image to simulate image config error" } type ImageConfigActionExecutor struct { client *channel.Client } func (*ImageConfigActionExecutor) Name() string { return "imageconfig" } func (*ImageConfigActionExecutor) SetChannel(channel spec.Channel) {} func (d *ImageConfigActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(ctx, expModel) } return d.create(ctx, expModel) } func (d *ImageConfigActionExecutor) create(ctx context.Context, expModel *spec.ExpModel) *spec.Response { imageName := expModel.ActionFlags[ImageNameFlag] imageTag := expModel.ActionFlags[ImageTagFlag] experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(experimentId, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } statuses := make([]v1alpha1.ResourceStatus, 0) success := false for _, c := range containerMatchedList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: c.GetIdentifier(), } objectMeta := types.NamespacedName{Name: c.PodName, Namespace: c.Namespace} pod := &v1.Pod{} err := d.client.Get(ctx, objectMeta, pod) if err != nil { logrusField.Errorf("get pod %s err, %v", c.PodName, err) status = status.CreateFailResourceStatus(spec.K8sExecFailed.Sprintf("get", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } if !isImageConfigPodReady(pod) { logrusField.Infof("pod %s is not ready", c.PodName) statuses = append(statuses, status.CreateFailResourceStatus(spec.PodNotReady.Sprintf(c.PodName), spec.PodNotReady.Code)) continue } if err := d.modifyPodImage(ctx, pod, imageName, imageTag); err != nil { logrusField.Warningf("modify pod %s image err, %v", c.PodName, err) status = status.CreateFailResourceStatus(spec.K8sExecFailed.Sprintf("update", err), spec.K8sExecFailed.Code) } else { status = status.CreateSuccessResourceStatus() success = true } statuses = append(statuses, status) } var experimentStatus v1alpha1.ExperimentStatus if success { experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses) } return spec.ReturnResultIgnoreCode(experimentStatus) } func (d *ImageConfigActionExecutor) destroy(ctx context.Context, expModel *spec.ExpModel) *spec.Response { containerMatchedList, err := model.GetContainerObjectMetaListFromContext(ctx) experimentId := model.GetExperimentIdFromContext(ctx) if err != nil { util.Errorf(experimentId, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } logrusField := logrus.WithField("experiment", experimentId) experimentStatus := v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{}) statuses := experimentStatus.ResStatuses for _, c := range containerMatchedList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: c.GetIdentifier(), } objectMeta := types.NamespacedName{Name: c.PodName, Namespace: c.Namespace} pod := &v1.Pod{} err := d.client.Get(ctx, objectMeta, pod) if err != nil { logrusField.Errorf("get pod %s err, %v", c.PodName, err) status = status.CreateFailResourceStatus(spec.K8sExecFailed.Sprintf("get", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } err = d.client.Delete(ctx, pod) if err != nil { logrusField.Errorf("delete pod %s err, %v", c.PodName, err) status = status.CreateFailResourceStatus(spec.K8sExecFailed.Sprintf("delete", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } status = status.CreateSuccessResourceStatus() statuses = append(statuses, status) } experimentStatus.ResStatuses = statuses return spec.ReturnResultIgnoreCode(experimentStatus) } // modifyPodImage modifies the pod container images to simulate image config error. // If imageName and imageTag are both empty, it appends "-image-config-error" to the original image (default behavior). // If imageName is provided, the image name portion is replaced. // If imageTag is provided, the image tag portion is replaced. func (d *ImageConfigActionExecutor) modifyPodImage(ctx context.Context, pod *v1.Pod, imageName, imageTag string) error { modified := false for i, container := range pod.Spec.Containers { key := fmt.Sprintf("%s-%s", "chaosblade.io/imageconfig", container.Name) if pod.Annotations == nil { pod.Annotations = make(map[string]string) } if isImageConfigAnnotationExist(pod.Annotations, key) { continue } pod.Annotations[key] = container.Image pod.Spec.Containers[i].Image = buildNewImage(container.Image, imageName, imageTag) modified = true } if !modified { return nil } return d.client.Update(ctx, pod) } // buildNewImage constructs the new image string based on provided imageName and imageTag. // If both are empty, returns "{original}-image-config-error" for backward compatibility. // The original image format can be: "name", "name:tag", "registry/name", "registry/name:tag". func buildNewImage(originalImage, imageName, imageTag string) string { if imageName == "" && imageTag == "" { return fmt.Sprintf("%s-image-config-error", originalImage) } // Parse the original image into name and tag parts origName, origTag := parseImage(originalImage) if imageName != "" { origName = imageName } if imageTag != "" { origTag = imageTag } if origTag == "" { return origName } return fmt.Sprintf("%s:%s", origName, origTag) } // parseImage splits an image reference into name and tag. // Handles formats like "nginx", "nginx:latest", "registry.example.com/nginx:v1.0". func parseImage(image string) (name, tag string) { // Find the last colon that is not part of a registry port (after the last /) slashIdx := strings.LastIndex(image, "/") colonIdx := strings.LastIndex(image, ":") // If colon exists and is after the last slash, it's a tag separator if colonIdx > slashIdx { return image[:colonIdx], image[colonIdx+1:] } return image, "" } // isImageConfigAnnotationExist checks if the annotation already exists func isImageConfigAnnotationExist(annotation map[string]string, key string) bool { _, ok := annotation[key] if !ok { return false } return true } // isImageConfigPodReady checks if the pod is ready func isImageConfigPodReady(pod *v1.Pod) bool { if pod.ObjectMeta.DeletionTimestamp != nil { return false } for _, condition := range pod.Status.Conditions { if condition.Type == v1.PodReady && condition.Status == v1.ConditionTrue { return true } } return false } ================================================ FILE: exec/pod/imageconfigexp_test.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "testing" ) func TestParseImage(t *testing.T) { tests := []struct { input string wantName string wantTag string }{ {"nginx", "nginx", ""}, {"nginx:latest", "nginx", "latest"}, {"nginx:1.19.0", "nginx", "1.19.0"}, {"registry.example.com/nginx", "registry.example.com/nginx", ""}, {"registry.example.com/nginx:v1.0", "registry.example.com/nginx", "v1.0"}, {"registry.example.com:5000/nginx", "registry.example.com:5000/nginx", ""}, {"registry.example.com:5000/nginx:v1.0", "registry.example.com:5000/nginx", "v1.0"}, {"my-registry.io:5000/my-org/my-image:sha-abc123", "my-registry.io:5000/my-org/my-image", "sha-abc123"}, {"localhost:5000/test", "localhost:5000/test", ""}, } for _, tt := range tests { t.Run(tt.input, func(t *testing.T) { gotName, gotTag := parseImage(tt.input) if gotName != tt.wantName { t.Errorf("parseImage(%q) name = %q, want %q", tt.input, gotName, tt.wantName) } if gotTag != tt.wantTag { t.Errorf("parseImage(%q) tag = %q, want %q", tt.input, gotTag, tt.wantTag) } }) } } func TestBuildNewImage(t *testing.T) { tests := []struct { name string originalImage string imageName string imageTag string want string }{ // Default behavior: both params empty -> append "-image-config-error" { name: "default behavior - simple image", originalImage: "nginx", imageName: "", imageTag: "", want: "nginx-image-config-error", }, { name: "default behavior - image with tag", originalImage: "nginx:latest", imageName: "", imageTag: "", want: "nginx:latest-image-config-error", }, { name: "default behavior - full registry path", originalImage: "registry.example.com/nginx:v1.0", imageName: "", imageTag: "", want: "registry.example.com/nginx:v1.0-image-config-error", }, // Only imageName provided { name: "replace image name only - simple image", originalImage: "nginx", imageName: "nginx-not-exist", imageTag: "", want: "nginx-not-exist", }, { name: "replace image name only - image with tag", originalImage: "nginx:latest", imageName: "nginx-not-exist", imageTag: "", want: "nginx-not-exist:latest", }, { name: "replace image name only - full registry path", originalImage: "registry.example.com/nginx:v1.0", imageName: "my-bad-image", imageTag: "", want: "my-bad-image:v1.0", }, // Only imageTag provided { name: "replace tag only - simple image without tag", originalImage: "nginx", imageName: "", imageTag: "non-existent-tag", want: "nginx:non-existent-tag", }, { name: "replace tag only - image with tag", originalImage: "nginx:latest", imageName: "", imageTag: "non-existent-tag", want: "nginx:non-existent-tag", }, { name: "replace tag only - full registry path", originalImage: "registry.example.com/nginx:v1.0", imageName: "", imageTag: "broken-tag", want: "registry.example.com/nginx:broken-tag", }, // Both imageName and imageTag provided { name: "replace both name and tag", originalImage: "nginx:latest", imageName: "bad-image", imageTag: "bad-tag", want: "bad-image:bad-tag", }, { name: "replace both - full registry path", originalImage: "registry.example.com/nginx:v1.0", imageName: "totally-wrong", imageTag: "no-such-tag", want: "totally-wrong:no-such-tag", }, // Edge cases with registry port { name: "registry with port - replace tag only", originalImage: "registry.example.com:5000/nginx:v1.0", imageName: "", imageTag: "broken", want: "registry.example.com:5000/nginx:broken", }, { name: "registry with port no tag - replace tag", originalImage: "registry.example.com:5000/nginx", imageName: "", imageTag: "broken", want: "registry.example.com:5000/nginx:broken", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := buildNewImage(tt.originalImage, tt.imageName, tt.imageTag) if got != tt.want { t.Errorf("buildNewImage(%q, %q, %q) = %q, want %q", tt.originalImage, tt.imageName, tt.imageTag, got, tt.want) } }) } } func TestIsImageConfigAnnotationExist(t *testing.T) { tests := []struct { name string annotation map[string]string key string want bool }{ { name: "annotation exists", annotation: map[string]string{"imageConfig-nginx": "nginx:latest"}, key: "imageConfig-nginx", want: true, }, { name: "annotation does not exist", annotation: map[string]string{"imageConfig-nginx": "nginx:latest"}, key: "imageConfig-redis", want: false, }, { name: "empty annotations", annotation: map[string]string{}, key: "imageConfig-nginx", want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := isImageConfigAnnotationExist(tt.annotation, tt.key) if got != tt.want { t.Errorf("isImageConfigAnnotationExist() = %v, want %v", got, tt.want) } }) } } ================================================ FILE: exec/pod/imagepullsecretserror.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "crypto/sha256" "encoding/base64" "encoding/json" "fmt" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ( // ChaosBladeIPSBackupAnnotation marks a Secret as a backup created by this action ChaosBladeIPSBackupAnnotation = "chaosblade.io/ips-backup" // ChaosBladeIPSOriginalNameAnnotation stores the original Secret name ChaosBladeIPSOriginalNameAnnotation = "chaosblade.io/ips-original-name" // ChaosBladeIPSOriginalNamespaceAnnotation stores the original Secret namespace ChaosBladeIPSOriginalNamespaceAnnotation = "chaosblade.io/ips-original-namespace" // ChaosBladeIPSExperimentLabel is the label key for experiment ID on backup Secrets ChaosBladeIPSExperimentLabel = "chaosblade.io/experiment" ) type ImagePullSecretsErrorActionSpec struct { spec.BaseExpActionCommandSpec } func NewImagePullSecretsErrorActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &ImagePullSecretsErrorActionSpec{ spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: "secret-name", Desc: "The name of the imagePullSecret to corrupt. If not specified, all imagePullSecrets of the target Pod will be corrupted", }, }, ActionExecutor: &ImagePullSecretsErrorActionExecutor{client: client}, ActionExample: `# Simulate image pull authentication failure for a specific pod blade create k8s pod-pod imagepullsecretserror --names my-app-pod --namespace default --kubeconfig ~/.kube/config # Simulate image pull authentication failure for pods selected by labels blade create k8s pod-pod imagepullsecretserror --labels app=nginx --namespace default --kubeconfig ~/.kube/config # Corrupt only a specific imagePullSecret blade create k8s pod-pod imagepullsecretserror --names my-app-pod --namespace default --secret-name my-registry-secret --kubeconfig ~/.kube/config `, ActionCategories: []string{model.CategorySystemContainer}, }, } } func (*ImagePullSecretsErrorActionSpec) Name() string { return "imagepullsecretserror" } func (*ImagePullSecretsErrorActionSpec) Aliases() []string { return []string{} } func (*ImagePullSecretsErrorActionSpec) ShortDesc() string { return "Simulate image pull authentication failure by corrupting imagePullSecrets" } func (*ImagePullSecretsErrorActionSpec) LongDesc() string { return "Simulate the scenario where a Pod fails to pull images from a private registry due to " + "authentication failure. This fault is injected by corrupting the credentials in the Secret " + "referenced by the Pod's imagePullSecrets field. The original Secret data is backed up to a " + "separate Secret for recovery. After corruption, the Pod is deleted so the controller recreates " + "it, and the new Pod will fail to pull images with ErrImagePull/ImagePullBackOff status. " + "When the experiment is destroyed, the original Secret is restored and the Pod is deleted again " + "to trigger a successful image pull." } type ImagePullSecretsErrorActionExecutor struct { client *channel.Client } func (*ImagePullSecretsErrorActionExecutor) Name() string { return "imagepullsecretserror" } func (*ImagePullSecretsErrorActionExecutor) SetChannel(channel spec.Channel) {} func (d *ImagePullSecretsErrorActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(uid, ctx, expModel) } return d.create(uid, ctx, expModel) } func (d *ImagePullSecretsErrorActionExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } secretNameFilter := expModel.ActionFlags["secret-name"] // Track processed Secrets to avoid corrupting the same Secret multiple times // when multiple Pods reference the same Secret processedSecrets := make(map[string]bool) statuses := make([]v1alpha1.ResourceStatus, 0) success := false for _, meta := range containerObjectMetaList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: meta.GetIdentifier(), } // Get the Pod pod := &v1.Pod{} err := d.client.Get(ctx, types.NamespacedName{Name: meta.PodName, Namespace: meta.Namespace}, pod) if err != nil { logrusField.Warningf("get pod %s/%s failed: %v", meta.Namespace, meta.PodName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("get pod failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } // Check imagePullSecrets if len(pod.Spec.ImagePullSecrets) == 0 { logrusField.Warningf("pod %s/%s has no imagePullSecrets", meta.Namespace, meta.PodName) status = status.CreateFailResourceStatus("pod has no imagePullSecrets", spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } // Filter by --secret-name if specified targetSecretRefs := pod.Spec.ImagePullSecrets if secretNameFilter != "" { targetSecretRefs = filterSecretRefs(targetSecretRefs, secretNameFilter) if len(targetSecretRefs) == 0 { logrusField.Warningf("pod %s/%s does not have imagePullSecret %s", meta.Namespace, meta.PodName, secretNameFilter) status = status.CreateFailResourceStatus( fmt.Sprintf("pod does not have imagePullSecret %s", secretNameFilter), spec.K8sExecFailed.Code, ) statuses = append(statuses, status) continue } } // Process each target Secret for this Pod. // All Secrets must be corrupted successfully before deleting the Pod. // If any corruption fails, roll back the ones that succeeded for this Pod // to avoid partial corruption leading to unpredictable behavior. corruptedInThisRound := make([]string, 0, len(targetSecretRefs)) allSecretsOk := true for _, secretRef := range targetSecretRefs { secretKey := fmt.Sprintf("%s/%s", meta.Namespace, secretRef.Name) if processedSecrets[secretKey] { // Already corrupted by a previous Pod in this experiment continue } if err := d.corruptSecret(ctx, logrusField, experimentId, meta.Namespace, secretRef.Name); err != nil { logrusField.Warningf("corrupt secret %s failed: %v", secretKey, err) allSecretsOk = false break } corruptedInThisRound = append(corruptedInThisRound, secretKey) } if !allSecretsOk { // Roll back Secrets corrupted in this round to avoid partial corruption. // Secrets corrupted by previous Pods (already in processedSecrets) are not // rolled back here because those Pods have already been deleted successfully. for _, secretKey := range corruptedInThisRound { if err := d.rollbackSecret(ctx, logrusField, experimentId, secretKey); err != nil { logrusField.Warningf("rollback secret %s failed: %v", secretKey, err) } } status = status.CreateFailResourceStatus("failed to corrupt all imagePullSecrets, rolled back", spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } // Mark all newly corrupted Secrets as processed for _, secretKey := range corruptedInThisRound { processedSecrets[secretKey] = true } // Delete the Pod to trigger recreation with corrupted credentials if err := d.client.Delete(ctx, pod); err != nil { if !apierrors.IsNotFound(err) { logrusField.Warningf("delete pod %s/%s failed: %v, rolling back corrupted secrets", meta.Namespace, meta.PodName, err) // Roll back Secrets corrupted in this round since Pod won't be recreated for _, secretKey := range corruptedInThisRound { if rbErr := d.rollbackSecret(ctx, logrusField, experimentId, secretKey); rbErr != nil { logrusField.Warningf("rollback secret %s after pod delete failure: %v", secretKey, rbErr) } delete(processedSecrets, secretKey) } status = status.CreateFailResourceStatus(fmt.Sprintf("delete pod failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } } logrusField.Infof("corrupted imagePullSecrets and deleted pod %s/%s", meta.Namespace, meta.PodName) status = status.CreateSuccessResourceStatus() statuses = append(statuses, status) success = true } var experimentStatus v1alpha1.ExperimentStatus if success { experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses) } return spec.ReturnResultIgnoreCode(experimentStatus) } func (d *ImagePullSecretsErrorActionExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } // Collect all unique namespaces involved namespaces := make(map[string]bool) for _, meta := range containerObjectMetaList { namespaces[meta.Namespace] = true } // Find and restore all backup Secrets for this experiment allSuccess := true for ns := range namespaces { if err := d.restoreSecretsInNamespace(ctx, logrusField, experimentId, ns); err != nil { logrusField.Warningf("restore secrets in namespace %s failed: %v", ns, err) allSuccess = false } } // Delete Pods to trigger recreation with restored credentials statuses := make([]v1alpha1.ResourceStatus, 0) for _, meta := range containerObjectMetaList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: meta.GetIdentifier(), } pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: meta.PodName, Namespace: meta.Namespace, }, } if err := d.client.Delete(ctx, pod); err != nil { if apierrors.IsNotFound(err) { logrusField.Infof("pod %s/%s already deleted", meta.Namespace, meta.PodName) } else { logrusField.Warningf("delete pod %s/%s failed: %v", meta.Namespace, meta.PodName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("delete pod failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) allSuccess = false continue } } status = status.CreateSuccessResourceStatus() status.State = v1alpha1.DestroyedState statuses = append(statuses, status) } if allSuccess { return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus(statuses)) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses)) } // corruptSecret backs up the original Secret data, then corrupts the credentials func (d *ImagePullSecretsErrorActionExecutor) corruptSecret(ctx context.Context, logrusField *logrus.Entry, experimentId, namespace, secretName string) error { // Get the Secret secret := &v1.Secret{} if err := d.client.Get(ctx, types.NamespacedName{Name: secretName, Namespace: namespace}, secret); err != nil { return fmt.Errorf("get secret %s/%s failed: %v", namespace, secretName, err) } // Validate Secret type if secret.Type != v1.SecretTypeDockerConfigJson && secret.Type != v1.SecretTypeDockercfg { return fmt.Errorf("secret %s/%s type is %s, expected %s or %s", namespace, secretName, secret.Type, v1.SecretTypeDockerConfigJson, v1.SecretTypeDockercfg) } // Create backup Secret backupName := generateBackupSecretName(experimentId, namespace, secretName) backupSecret := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: backupName, Namespace: namespace, Labels: map[string]string{ ChaosBladeIPSExperimentLabel: experimentId, }, Annotations: map[string]string{ ChaosBladeIPSBackupAnnotation: "true", ChaosBladeIPSOriginalNameAnnotation: secretName, ChaosBladeIPSOriginalNamespaceAnnotation: namespace, }, }, Type: secret.Type, Data: copySecretData(secret.Data), } createdBackup := false if err := d.client.Create(ctx, backupSecret); err != nil { if apierrors.IsAlreadyExists(err) { // Check if the existing backup belongs to this experiment existingBackup := &v1.Secret{} if getErr := d.client.Get(ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, existingBackup); getErr != nil { return fmt.Errorf("backup secret %s/%s already exists and failed to verify owner: %v", namespace, backupName, getErr) } if existingBackup.Labels[ChaosBladeIPSExperimentLabel] != experimentId { return fmt.Errorf("secret %s/%s is already being used by another experiment %s", namespace, secretName, existingBackup.Labels[ChaosBladeIPSExperimentLabel]) } logrusField.Infof("backup secret %s/%s already exists for this experiment, skip creation", namespace, backupName) } else { return fmt.Errorf("create backup secret %s/%s failed: %v", namespace, backupName, err) } } else { createdBackup = true logrusField.Infof("created backup secret %s/%s for original %s", namespace, backupName, secretName) } // Corrupt the credentials var corruptedData []byte var corruptErr error if secret.Type == v1.SecretTypeDockerConfigJson { dataKey := v1.DockerConfigJsonKey originalData, ok := secret.Data[dataKey] if !ok { return fmt.Errorf("secret %s/%s has no %s key", namespace, secretName, dataKey) } corruptedData, corruptErr = corruptDockerConfigJSON(originalData) } else { // kubernetes.io/dockercfg dataKey := v1.DockerConfigKey originalData, ok := secret.Data[dataKey] if !ok { return fmt.Errorf("secret %s/%s has no %s key", namespace, secretName, dataKey) } corruptedData, corruptErr = corruptDockerCfg(originalData) } if corruptErr != nil { // Rollback: only delete the backup if we created it in this call if createdBackup { if delErr := d.client.Delete(ctx, backupSecret); delErr != nil { logrusField.Warningf("rollback: delete backup secret %s/%s failed: %v", namespace, backupName, delErr) } } return fmt.Errorf("corrupt secret data failed: %v", corruptErr) } // Update the Secret with corrupted data if secret.Type == v1.SecretTypeDockerConfigJson { secret.Data[v1.DockerConfigJsonKey] = corruptedData } else { secret.Data[v1.DockerConfigKey] = corruptedData } if err := d.client.Update(ctx, secret); err != nil { // Rollback: only delete the backup if we created it in this call if createdBackup { if delErr := d.client.Delete(ctx, backupSecret); delErr != nil { logrusField.Warningf("rollback: delete backup secret %s/%s failed: %v", namespace, backupName, delErr) } } return fmt.Errorf("update secret %s/%s failed: %v", namespace, secretName, err) } logrusField.Infof("corrupted credentials in secret %s/%s", namespace, secretName) return nil } // rollbackSecret restores a single Secret from its backup and deletes the backup. // secretKey is in the format "namespace/secretName". func (d *ImagePullSecretsErrorActionExecutor) rollbackSecret(ctx context.Context, logrusField *logrus.Entry, experimentId, secretKey string) error { parts := strings.SplitN(secretKey, "/", 2) if len(parts) != 2 { return fmt.Errorf("invalid secret key format: %s", secretKey) } namespace, secretName := parts[0], parts[1] backupName := generateBackupSecretName(experimentId, namespace, secretName) // Get the backup Secret backup := &v1.Secret{} if err := d.client.Get(ctx, types.NamespacedName{Name: backupName, Namespace: namespace}, backup); err != nil { if apierrors.IsNotFound(err) { logrusField.Infof("rollback: backup secret %s/%s not found, nothing to restore", namespace, backupName) return nil } return fmt.Errorf("rollback: get backup secret %s/%s failed: %v", namespace, backupName, err) } // Get the original Secret and restore its data originalSecret := &v1.Secret{} if err := d.client.Get(ctx, types.NamespacedName{Name: secretName, Namespace: namespace}, originalSecret); err != nil { if !apierrors.IsNotFound(err) { return fmt.Errorf("rollback: get original secret %s/%s failed: %v", namespace, secretName, err) } // Original was deleted externally; just clean up the backup } else { originalSecret.Data = copySecretData(backup.Data) originalSecret.Type = backup.Type if err := d.client.Update(ctx, originalSecret); err != nil { return fmt.Errorf("rollback: restore secret %s/%s failed: %v", namespace, secretName, err) } logrusField.Infof("rollback: restored secret %s/%s from backup", namespace, secretName) } // Delete the backup Secret if err := d.client.Delete(ctx, backup); err != nil && !apierrors.IsNotFound(err) { logrusField.Warningf("rollback: delete backup secret %s/%s failed: %v", namespace, backupName, err) } return nil } // restoreSecretsInNamespace finds all backup Secrets in the given namespace for this experiment // and restores the original Secrets from the backups func (d *ImagePullSecretsErrorActionExecutor) restoreSecretsInNamespace(ctx context.Context, logrusField *logrus.Entry, experimentId, namespace string) error { backupList := &v1.SecretList{} listOpts := []client.ListOption{ client.InNamespace(namespace), client.MatchingLabels{ChaosBladeIPSExperimentLabel: experimentId}, } if err := d.client.List(ctx, backupList, listOpts...); err != nil { return fmt.Errorf("list backup secrets in namespace %s failed: %v", namespace, err) } var errs []string for i := range backupList.Items { backup := &backupList.Items[i] // Only process Secrets explicitly marked as IPS backups if backup.Annotations[ChaosBladeIPSBackupAnnotation] != "true" { continue } originalName := backup.Annotations[ChaosBladeIPSOriginalNameAnnotation] originalNamespace := backup.Annotations[ChaosBladeIPSOriginalNamespaceAnnotation] if originalName == "" || originalNamespace == "" { logrusField.Warningf("backup secret %s/%s missing original name/namespace annotations, skip", namespace, backup.Name) errs = append(errs, fmt.Sprintf("backup %s/%s missing annotations", namespace, backup.Name)) continue } // Get the original Secret originalSecret := &v1.Secret{} err := d.client.Get(ctx, types.NamespacedName{Name: originalName, Namespace: originalNamespace}, originalSecret) if err != nil { if apierrors.IsNotFound(err) { logrusField.Warningf("original secret %s/%s not found, deleting backup", originalNamespace, originalName) } else { logrusField.Warningf("get original secret %s/%s failed: %v", originalNamespace, originalName, err) errs = append(errs, fmt.Sprintf("get secret %s/%s failed: %v", originalNamespace, originalName, err)) continue } } else { // Restore the original data and type originalSecret.Data = copySecretData(backup.Data) originalSecret.Type = backup.Type if err := d.client.Update(ctx, originalSecret); err != nil { logrusField.Warningf("restore secret %s/%s failed: %v", originalNamespace, originalName, err) errs = append(errs, fmt.Sprintf("restore secret %s/%s failed: %v", originalNamespace, originalName, err)) continue } logrusField.Infof("restored secret %s/%s from backup", originalNamespace, originalName) } // Delete the backup Secret if err := d.client.Delete(ctx, backup); err != nil { if !apierrors.IsNotFound(err) { logrusField.Warningf("delete backup secret %s/%s failed: %v", namespace, backup.Name, err) errs = append(errs, fmt.Sprintf("delete backup %s/%s failed: %v", namespace, backup.Name, err)) } } else { logrusField.Infof("deleted backup secret %s/%s", namespace, backup.Name) } } if len(errs) > 0 { return fmt.Errorf("restore secrets in namespace %s had %d error(s): %s", namespace, len(errs), strings.Join(errs, "; ")) } return nil } // generateBackupSecretName creates a deterministic backup Secret name // Format: chaosblade-ips-- func generateBackupSecretName(experimentId, namespace, secretName string) string { hash := sha256.Sum256([]byte(fmt.Sprintf("%s/%s", namespace, secretName))) hashStr := fmt.Sprintf("%x", hash[:4]) expIdPrefix := experimentId if len(expIdPrefix) > 8 { expIdPrefix = expIdPrefix[:8] } return fmt.Sprintf("chaosblade-ips-%s-%s", expIdPrefix, hashStr) } // corruptDockerConfigJSON corrupts the auth credentials in a .dockerconfigjson format Secret // The JSON structure is preserved but all credentials are replaced with invalid values func corruptDockerConfigJSON(data []byte) ([]byte, error) { var config map[string]interface{} if err := json.Unmarshal(data, &config); err != nil { return nil, fmt.Errorf("unmarshal dockerconfigjson failed: %v", err) } auths, ok := config["auths"] if !ok { return nil, fmt.Errorf("dockerconfigjson has no auths key") } authsMap, ok := auths.(map[string]interface{}) if !ok { return nil, fmt.Errorf("auths field is not a map") } if len(authsMap) == 0 { return nil, fmt.Errorf("dockerconfigjson auths is empty, no credentials to corrupt") } // Corrupt each registry's credentials if n := corruptRegistryCredentials(authsMap); n == 0 { return nil, fmt.Errorf("dockerconfigjson: no valid registry credentials found to corrupt") } config["auths"] = authsMap result, err := json.Marshal(config) if err != nil { return nil, fmt.Errorf("marshal corrupted dockerconfigjson failed: %v", err) } return result, nil } // corruptDockerCfg corrupts the auth credentials in a .dockercfg format Secret // The .dockercfg format is: {"registry": {"username": "...", "password": "...", "auth": "..."}} func corruptDockerCfg(data []byte) ([]byte, error) { var config map[string]interface{} if err := json.Unmarshal(data, &config); err != nil { return nil, fmt.Errorf("unmarshal dockercfg failed: %v", err) } if len(config) == 0 { return nil, fmt.Errorf("dockercfg is empty, no credentials to corrupt") } if n := corruptRegistryCredentials(config); n == 0 { return nil, fmt.Errorf("dockercfg: no valid registry credentials found to corrupt") } result, err := json.Marshal(config) if err != nil { return nil, fmt.Errorf("marshal corrupted dockercfg failed: %v", err) } return result, nil } // corruptRegistryCredentials replaces the auth credentials in each registry entry with invalid values. // Returns the number of registry entries that were actually corrupted. func corruptRegistryCredentials(registries map[string]interface{}) int { invalidAuth := base64.StdEncoding.EncodeToString([]byte("chaosblade-invalid-user:chaosblade-invalid-pass")) corrupted := 0 for registry, creds := range registries { credsMap, ok := creds.(map[string]interface{}) if !ok { continue } credsMap["username"] = "chaosblade-invalid-user" credsMap["password"] = "chaosblade-invalid-pass" credsMap["auth"] = invalidAuth delete(credsMap, "identitytoken") delete(credsMap, "registrytoken") registries[registry] = credsMap corrupted++ } return corrupted } // filterSecretRefs filters the imagePullSecrets list by a specific Secret name func filterSecretRefs(refs []v1.LocalObjectReference, name string) []v1.LocalObjectReference { filtered := make([]v1.LocalObjectReference, 0) for _, ref := range refs { if ref.Name == name { filtered = append(filtered, ref) } } return filtered } // copySecretData creates a deep copy of Secret data func copySecretData(data map[string][]byte) map[string][]byte { if data == nil { return nil } result := make(map[string][]byte, len(data)) for k, v := range data { copied := make([]byte, len(v)) copy(copied, v) result[k] = copied } return result } ================================================ FILE: exec/pod/imagepullsecretserror_test.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "encoding/base64" "encoding/json" "testing" v1 "k8s.io/api/core/v1" ) func TestCorruptDockerConfigJSON(t *testing.T) { // Valid dockerconfigjson with multiple registries input := map[string]interface{}{ "auths": map[string]interface{}{ "registry.example.com": map[string]interface{}{ "username": "real-user", "password": "real-pass", "auth": base64.StdEncoding.EncodeToString([]byte("real-user:real-pass")), }, "docker.io": map[string]interface{}{ "username": "docker-user", "password": "docker-pass", "auth": base64.StdEncoding.EncodeToString([]byte("docker-user:docker-pass")), "identitytoken": "some-token", }, }, } inputBytes, err := json.Marshal(input) if err != nil { t.Fatalf("failed to marshal input: %v", err) } result, err := corruptDockerConfigJSON(inputBytes) if err != nil { t.Fatalf("corruptDockerConfigJSON failed: %v", err) } // Parse result var output map[string]interface{} if err := json.Unmarshal(result, &output); err != nil { t.Fatalf("failed to unmarshal result: %v", err) } auths, ok := output["auths"].(map[string]interface{}) if !ok { t.Fatal("result has no 'auths' map") } // Verify both registries are corrupted expectedAuth := base64.StdEncoding.EncodeToString([]byte("chaosblade-invalid-user:chaosblade-invalid-pass")) for registry, creds := range auths { credsMap, ok := creds.(map[string]interface{}) if !ok { t.Fatalf("credentials for %s is not a map", registry) } if credsMap["username"] != "chaosblade-invalid-user" { t.Errorf("registry %s: expected username 'chaosblade-invalid-user', got '%v'", registry, credsMap["username"]) } if credsMap["password"] != "chaosblade-invalid-pass" { t.Errorf("registry %s: expected password 'chaosblade-invalid-pass', got '%v'", registry, credsMap["password"]) } if credsMap["auth"] != expectedAuth { t.Errorf("registry %s: expected auth '%s', got '%v'", registry, expectedAuth, credsMap["auth"]) } if _, exists := credsMap["identitytoken"]; exists { t.Errorf("registry %s: identitytoken should be removed", registry) } if _, exists := credsMap["registrytoken"]; exists { t.Errorf("registry %s: registrytoken should be removed", registry) } } } func TestCorruptDockerConfigJSON_EmptyAuths(t *testing.T) { input := map[string]interface{}{ "auths": map[string]interface{}{}, } inputBytes, err := json.Marshal(input) if err != nil { t.Fatalf("failed to marshal input: %v", err) } _, err = corruptDockerConfigJSON(inputBytes) if err == nil { t.Fatal("expected error for empty auths, got nil") } } func TestCorruptDockerConfigJSON_NoAuthsKey(t *testing.T) { input := map[string]interface{}{ "someOtherKey": "value", } inputBytes, err := json.Marshal(input) if err != nil { t.Fatalf("failed to marshal input: %v", err) } _, err = corruptDockerConfigJSON(inputBytes) if err == nil { t.Fatal("expected error for missing auths key, got nil") } } func TestCorruptDockerConfigJSON_InvalidJSON(t *testing.T) { _, err := corruptDockerConfigJSON([]byte("not valid json")) if err == nil { t.Fatal("expected error for invalid JSON, got nil") } } func TestCorruptDockerCfg(t *testing.T) { // .dockercfg format: top-level keys are registries input := map[string]interface{}{ "https://index.docker.io/v1/": map[string]interface{}{ "username": "myuser", "password": "mypass", "email": "user@example.com", "auth": base64.StdEncoding.EncodeToString([]byte("myuser:mypass")), }, } inputBytes, err := json.Marshal(input) if err != nil { t.Fatalf("failed to marshal input: %v", err) } result, err := corruptDockerCfg(inputBytes) if err != nil { t.Fatalf("corruptDockerCfg failed: %v", err) } var output map[string]interface{} if err := json.Unmarshal(result, &output); err != nil { t.Fatalf("failed to unmarshal result: %v", err) } expectedAuth := base64.StdEncoding.EncodeToString([]byte("chaosblade-invalid-user:chaosblade-invalid-pass")) for registry, creds := range output { credsMap, ok := creds.(map[string]interface{}) if !ok { t.Fatalf("credentials for %s is not a map", registry) } if credsMap["username"] != "chaosblade-invalid-user" { t.Errorf("registry %s: expected username 'chaosblade-invalid-user', got '%v'", registry, credsMap["username"]) } if credsMap["password"] != "chaosblade-invalid-pass" { t.Errorf("registry %s: expected password 'chaosblade-invalid-pass', got '%v'", registry, credsMap["password"]) } if credsMap["auth"] != expectedAuth { t.Errorf("registry %s: expected auth '%s', got '%v'", registry, expectedAuth, credsMap["auth"]) } // email field should be preserved if credsMap["email"] != "user@example.com" { t.Errorf("registry %s: email field should be preserved, got '%v'", registry, credsMap["email"]) } } } func TestCorruptDockerCfg_Empty(t *testing.T) { input := map[string]interface{}{} inputBytes, err := json.Marshal(input) if err != nil { t.Fatalf("failed to marshal input: %v", err) } _, err = corruptDockerCfg(inputBytes) if err == nil { t.Fatal("expected error for empty dockercfg, got nil") } } func TestGenerateBackupSecretName(t *testing.T) { tests := []struct { name string experimentId string namespace string secretName string }{ { name: "normal case", experimentId: "abc12345def67890", namespace: "default", secretName: "my-registry-secret", }, { name: "short experiment id", experimentId: "short", namespace: "kube-system", secretName: "docker-secret", }, { name: "long experiment id", experimentId: "very-long-experiment-id-that-exceeds-eight-characters", namespace: "production", secretName: "registry-credentials", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := generateBackupSecretName(tt.experimentId, tt.namespace, tt.secretName) // Should start with prefix if len(result) < len("chaosblade-ips-") { t.Errorf("result too short: %s", result) } // Should be deterministic result2 := generateBackupSecretName(tt.experimentId, tt.namespace, tt.secretName) if result != result2 { t.Errorf("not deterministic: %s != %s", result, result2) } // Should be a valid DNS subdomain (max 253 chars, lowercase, alphanumeric/dash) if len(result) > 253 { t.Errorf("name too long: %d characters", len(result)) } // Different inputs should produce different names different := generateBackupSecretName(tt.experimentId, tt.namespace, "other-secret") if result == different { t.Errorf("different inputs produced same name: %s", result) } }) } } func TestGenerateBackupSecretName_Deterministic(t *testing.T) { // Same inputs should always produce the same output name1 := generateBackupSecretName("exp123", "default", "my-secret") name2 := generateBackupSecretName("exp123", "default", "my-secret") if name1 != name2 { t.Errorf("expected deterministic result, got %s and %s", name1, name2) } } func TestImagePullSecretsErrorActionSpec_Name(t *testing.T) { spec := &ImagePullSecretsErrorActionSpec{} if spec.Name() != "imagepullsecretserror" { t.Errorf("expected name 'imagepullsecretserror', got '%s'", spec.Name()) } } func TestImagePullSecretsErrorActionSpec_Aliases(t *testing.T) { spec := &ImagePullSecretsErrorActionSpec{} aliases := spec.Aliases() if len(aliases) != 0 { t.Errorf("expected no aliases, got %v", aliases) } } func TestImagePullSecretsErrorActionExecutor_Name(t *testing.T) { executor := &ImagePullSecretsErrorActionExecutor{} if executor.Name() != "imagepullsecretserror" { t.Errorf("expected name 'imagepullsecretserror', got '%s'", executor.Name()) } } func TestFilterSecretRefs(t *testing.T) { refs := []v1.LocalObjectReference{ {Name: "secret-a"}, {Name: "secret-b"}, {Name: "secret-c"}, } tests := []struct { name string filter string expected int }{ { name: "match single", filter: "secret-a", expected: 1, }, { name: "no match", filter: "nonexistent", expected: 0, }, { name: "match last", filter: "secret-c", expected: 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := filterSecretRefs(refs, tt.filter) if len(result) != tt.expected { t.Errorf("expected %d results, got %d", tt.expected, len(result)) } if tt.expected > 0 && result[0].Name != tt.filter { t.Errorf("expected name %s, got %s", tt.filter, result[0].Name) } }) } } func TestFilterSecretRefs_EmptyInput(t *testing.T) { result := filterSecretRefs(nil, "any") if len(result) != 0 { t.Errorf("expected 0 results for nil input, got %d", len(result)) } result = filterSecretRefs([]v1.LocalObjectReference{}, "any") if len(result) != 0 { t.Errorf("expected 0 results for empty input, got %d", len(result)) } } func TestFilterSecretRefs_DuplicateNames(t *testing.T) { refs := []v1.LocalObjectReference{ {Name: "my-secret"}, {Name: "other-secret"}, {Name: "my-secret"}, } result := filterSecretRefs(refs, "my-secret") if len(result) != 2 { t.Errorf("expected 2 results for duplicate names, got %d", len(result)) } } func TestCopySecretData(t *testing.T) { original := map[string][]byte{ ".dockerconfigjson": []byte(`{"auths":{"registry.io":{"auth":"dXNlcjpwYXNz"}}}`), } copied := copySecretData(original) // Verify content is the same if string(copied[".dockerconfigjson"]) != string(original[".dockerconfigjson"]) { t.Error("copied content should match original") } // Verify modifying copy doesn't affect original copied[".dockerconfigjson"][0] = 'X' if original[".dockerconfigjson"][0] == 'X' { t.Error("modifying copy should not affect original") } } func TestCopySecretData_Nil(t *testing.T) { result := copySecretData(nil) if result != nil { t.Errorf("expected nil for nil input, got %v", result) } } ================================================ FILE: exec/pod/pod.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "fmt" "strings" "github.com/chaosblade-io/chaosblade-exec-os/exec/cpu" "github.com/chaosblade-io/chaosblade-exec-os/exec/disk" "github.com/chaosblade-io/chaosblade-exec-os/exec/file" "github.com/chaosblade-io/chaosblade-exec-os/exec/mem" "github.com/chaosblade-io/chaosblade-exec-os/exec/network" "github.com/chaosblade-io/chaosblade-exec-os/exec/network/tc" "github.com/chaosblade-io/chaosblade-exec-os/exec/script" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" ) type ResourceModelSpec struct { model.BaseResourceExpModelSpec } func NewResourceModelSpec(client *channel.Client) model.ResourceExpModelSpec { modelSpec := &ResourceModelSpec{ model.NewBaseResourceExpModelSpec("pod", client), } // os experiment models osExpModels := model.NewOSSubResourceModelSpec().ExpModels() spec.AddExecutorToModelSpec(&model.CommonExecutor{Client: client}, osExpModels...) // pod-self experiment models expModels := append(osExpModels, NewSelfExpModelCommandSpec(client)) spec.AddFlagsToModelSpec(getResourceFlags, expModels...) modelSpec.RegisterExpModels(expModels...) addActionExamples(modelSpec) return modelSpec } func addActionExamples(modelSpec *ResourceModelSpec) { for _, expModelSpec := range modelSpec.ExpModelSpecs { for _, action := range expModelSpec.Actions() { v := interface{}(action) switch v.(type) { case *disk.FillActionSpec: action.SetLongDesc("The disk fill scenario experiment in the pod") action.SetExample( ` # Fill the /home directory with 40G of disk space in the pod blade create k8s pod-disk fill --path /home --size 40000 --names nginx-app --kubeconfig ~/.kube/config --namespace default # Fill the /home directory with 80% of the disk space in the pod and retains the file handle that populates the disk blade create k8s pod-disk fill --path /home --percent 80 --retain-handle --names nginx-app --kubeconfig ~/.kube/config --namespace default # Perform a fixed-size experimental scenario in the pod blade c k8s pod-disk fill --path /home --reserve 1024 --names nginx-app --kubeconfig ~/.kube/config --namespace default `, ) case *disk.BurnActionSpec: action.SetLongDesc("Disk read and write IO load experiment in the pod") action.SetExample( `# The data of rkB/s, wkB/s and % Util were mainly observed. Perform disk read IO high-load scenarios blade create k8s pod-disk burn --read --path /home --names nginx-app --kubeconfig ~/.kube/config --namespace default # Perform disk write IO high-load scenarios blade create k8s pod-disk burn --write --path /home --names nginx-app --kubeconfig ~/.kube/config --namespace default8 # Read and write IO load scenarios are performed at the same time. Path is not specified. The default is / blade create k8s pod-disk burn --read --write --names nginx-app --kubeconfig ~/.kube/config --namespace default`, ) case *mem.MemLoadActionCommand: action.SetLongDesc("The memory fill experiment scenario in the pod") action.SetExample( `# The execution memory footprint is 50% blade create k8s pod-mem load --mode ram --mem-percent 50 --names nginx-app --kubeconfig ~/.kube/config --namespace default # The execution memory footprint is 50%, cache model blade create k8s pod-mem load --mode cache --mem-percent 50 --names nginx-app --kubeconfig ~/.kube/config --namespace default # The execution memory footprint is 50%, usage contains buffer/cache blade create k8s pod-mem load --mode ram --mem-percent 50 --include-buffer-cache --names nginx-app --kubeconfig ~/.kube/config --namespace default # The execution memory footprint is 50% for 200 seconds blade create k8s pod-mem load --mode ram --mem-percent 50 --timeout 200 --names nginx-app --kubeconfig ~/.kube/config --namespace default # 200M memory is reserved blade create k8s pod-mem load --mode ram --reserve 200 --rate 100 --names nginx-app --kubeconfig ~/.kube/config --namespace default`, ) case *file.FileAppendActionSpec: action.SetLongDesc("The file append experiment scenario in the pod") action.SetExample( `# Appends the content "HELLO WORLD" to the /home/logs/nginx.log file blade create k8s pod-file append --filepath=/home/logs/nginx.log --content="HELL WORLD" --names nginx-app --kubeconfig ~/.kube/config --namespace default # Appends the content "HELLO WORLD" to the /home/logs/nginx.log file, interval 10 seconds blade create k8s pod-file append --filepath=/home/logs/nginx.log --content="HELL WORLD" --interval 10 --names nginx-app --kubeconfig ~/.kube/config --namespace default # Appends the content "HELLO WORLD" to the /home/logs/nginx.log file, enable base64 encoding blade create k8s pod-file append --filepath=/home/logs/nginx.log --content=SEVMTE8gV09STEQ= --names nginx-app --kubeconfig ~/.kube/config --namespace default # mock interface timeout exception blade create k8s pod-file append --filepath=/home/logs/nginx.log --content="@{DATE:+%Y-%m-%d %H:%M:%S} ERROR invoke getUser timeout [@{RANDOM:100-200}]ms abc mock exception" --names nginx-app --kubeconfig ~/.kube/config --namespace default `, ) case *file.FileAddActionSpec: action.SetLongDesc("The file add experiment scenario in the pod") action.SetExample( `# Create a file named nginx.log in the /home directory blade create k8s pod-file add --filepath /home/nginx.log --names nginx-app --kubeconfig ~/.kube/config --namespace default # Create a file named nginx.log in the /home directory with the contents of HELLO WORLD blade create k8s pod-file add --filepath /home/nginx.log --content "HELLO WORLD" --names nginx-app --kubeconfig ~/.kube/config --namespace default # Create a file named nginx.log in the /temp directory and automatically create directories that don't exist blade create k8s pod-file add --filepath /temp/nginx.log --auto-create-dir --names nginx-app --kubeconfig ~/.kube/config --namespace default # Create a directory named /nginx in the /temp directory and automatically create directories that don't exist blade create k8s pod-file add --directory --filepath /temp/nginx --auto-create-dir --names nginx-app --kubeconfig ~/.kube/config --namespace default `, ) case *file.FileChmodActionSpec: action.SetLongDesc("The file permission modification scenario in the pod") action.SetExample(`# Modify /home/logs/nginx.log file permissions to 777 blade create k8s pod-file chmod --filepath /home/logs/nginx.log --mark=777 --names nginx-app --kubeconfig ~/.kube/config --namespace default `) case *file.FileDeleteActionSpec: action.SetLongDesc("The file delete scenario in the pod") action.SetExample( `# Delete the file /home/logs/nginx.log blade create k8s pod-file delete --filepath /home/logs/nginx.log --names nginx-app --kubeconfig ~/.kube/config --namespace default # Force delete the file /home/logs/nginx.log unrecoverable blade create k8s pod-file delete --filepath /home/logs/nginx.log --force --names nginx-app --kubeconfig ~/.kube/config --namespace default `, ) case *file.FileMoveActionSpec: action.SetExample("The file move scenario in the pod") action.SetExample(`# Move the file /home/logs/nginx.log to /tmp blade create k8s pod-file move --filepath /home/logs/nginx.log --target /tmp --names nginx-app --kubeconfig ~/.kube/config --namespace default # Force Move the file /home/logs/nginx.log to /temp blade create k8s pod-file move --filepath /home/logs/nginx.log --target /tmp --force --names nginx-app --kubeconfig ~/.kube/config --namespace default # Move the file /home/logs/nginx.log to /temp/ and automatically create directories that don't exist blade create k8s pod-file move --filepath /home/logs/nginx.log --target /temp --auto-create-dir --names nginx-app --kubeconfig ~/.kube/config --namespace default `) case *tc.DelayActionSpec: action.SetExample( `# Access to native 8080 and 8081 ports is delayed by 3 seconds, and the delay time fluctuates by 1 second blade create k8s pod-network delay --time 3000 --offset 1000 --interface eth0 --local-port 8080,8081 --names nginx-app --kubeconfig ~/.kube/config --namespace default # Local access to external 14.215.177.39 machine (ping www.baidu.com obtained IP) port 80 delay of 3 seconds blade create k8s pod-network delay --time 3000 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --names nginx-app --kubeconfig ~/.kube/config --namespace default # Do a 5 second delay for the entire network card eth0, excluding ports 22 and 8000 to 8080 blade create k8s pod-network delay --time 5000 --interface eth0 --exclude-port 22,8000-8080 --names nginx-app --kubeconfig ~/.kube/config --namespace default`, ) case *network.DropActionSpec: action.SetExample( `# Experimental scenario of network shielding blade create k8s pod-network drop --names nginx-app --kubeconfig ~/.kube/config --namespace default`, ) case *network.DnsActionSpec: action.SetExample( `# The domain name www.baidu.com is not accessible blade create k8s pod-network dns --domain www.baidu.com --ip 10.0.0.0 --names nginx-app --kubeconfig ~/.kube/config --namespace default`, ) case *tc.LossActionSpec: action.SetExample(`# Access to native 8080 and 8081 ports lost 70% of packets blade create k8s pod-network loss --percent 70 --interface eth0 --local-port 8080,8081 --names nginx-app --kubeconfig ~/.kube/config --namespace default # The machine accesses external 14.215.177.39 machine (ping www.baidu.com) 80 port packet loss rate 100% blade create k8s pod-network loss --percent 100 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --names nginx-app --kubeconfig ~/.kube/config --namespace default # Do 60% packet loss for the entire network card Eth0, excluding ports 22 and 8000 to 8080 blade create k8s pod-network loss --percent 60 --interface eth0 --exclude-port 22,8000-8080 --names nginx-app --kubeconfig ~/.kube/config --namespace default # Realize the whole network card is not accessible, not accessible time 20 seconds. After executing the following command, the current network is disconnected and restored in 20 seconds. Remember!! Don't forget -timeout parameter blade create k8s pod-network loss --percent 100 --interface eth0 --timeout 20 --names nginx-app --kubeconfig ~/.kube/config --namespace default`) case *tc.DuplicateActionSpec: action.SetExample(`# Specify the network card eth0 and repeat the packet by 10% blade create k8s pod-network duplicate --percent=10 --interface=eth0 --names nginx-app --kubeconfig ~/.kube/config --namespace default`) case *tc.CorruptActionSpec: action.SetExample(`# Access to the specified IP request packet is corrupted, 80% of the time blade create k8s pod-network corrupt --percent 80 --destination-ip 180.101.49.12 --interface eth0 --names nginx-app --kubeconfig ~/.kube/config --namespace default`) case *tc.ReorderActionSpec: action.SetExample(`# Access the specified IP request packet disorder blade create k8s pod-network reorder --correlation 80 --percent 50 --gap 2 --time 500 --interface eth0 --destination-ip 180.101.49.12 --names nginx-app --kubeconfig ~/.kube/config --namespace default`) case *network.OccupyActionSpec: action.SetExample(`#Specify port 8080 occupancy blade create k8s pod-network occupy --port 8080 --force --names nginx-app --kubeconfig ~/.kube/config --namespace default # The machine accesses external 14.215.177.39 machine (ping www.baidu.com) 80 port packet loss rate 100% blade create k8s pod-network loss --percent 100 --interface eth0 --remote-port 80 --destination-ip 14.215.177.39 --names nginx-app --kubeconfig ~/.kube/config --namespace default`) case *cpu.FullLoadActionCommand: action.SetExample(` # Create a CPU full load experiment blade create k8s pod-cpu load --names nginx-app --kubeconfig ~/.kube/config --namespace default #Specifies two random kernel's full load blade create k8s pod-cpu load --names nginx-app --kubeconfig ~/.kube/config --namespace default --cpu-percent 60 --cpu-count 2 # Specifies that the kernel is full load with index 0, 3, and that the kernel's index starts at 0 blade create k8s pod-cpu load --names nginx-app --kubeconfig ~/.kube/config --namespace default --cpu-list 0,3 # Specify the kernel full load of indexes 1-3 blade create k8s pod-cpu load --names nginx-app --kubeconfig ~/.kube/config --namespace default --cpu-list 1-3 # Specified percentage load blade create k8s pod-cpu load --names nginx-app --kubeconfig ~/.kube/config --namespace default --cpu-percent 60`) case *script.ScriptDelayActionCommand: action.SetExample(` # Add commands to the script "start0() { sleep 10.000000 ...}" blade create k8s pod-script delay --time 10000 --file test.sh --function-name start0 --names nginx-app --kubeconfig ~/.kube/config --namespace default`) case *script.ScriptExitActionCommand: action.SetExample(` # Add commands to the script "start0() { echo this-is-error-message; exit 1; ... }" blade create k8s pod-script exit --exit-code 1 --exit-message this-is-error-message --file test.sh --function-name start0 --names nginx-app --kubeconfig ~/.kube/config --namespace default`) case *PodContainerCreatingActionSpec: action.SetLongDesc("Make pod stuck in ContainerCreating state by creating a PV with an unreachable NFS server, a PVC bound to it, and a Pod that mounts the PVC. Since the NFS server is unreachable, the volume mount fails and the Pod remains stuck in ContainerCreating state.") action.SetExample( `# Create a pod stuck in ContainerCreating state in the default namespace blade create k8s pod-pod containercreating --namespace default --kubeconfig ~/.kube/config # Create a pod stuck in ContainerCreating state with custom volume mount path blade create k8s pod-pod containercreating --namespace default --volume-mount-path /data --kubeconfig ~/.kube/config`, ) case *PodSchedulingFailureActionSpec: action.SetLongDesc("Make pod scheduling fail by injecting unreachable affinity rules to the target workload (Deployment/DaemonSet/StatefulSet). The scheduler will not find any node matching the rules, causing the Pod to remain in Pending state.") action.SetExample( `# Inject scheduling failure to a deployment by node affinity blade create k8s pod-pod schedulingfailure --namespace default --workload-type deployment --workload-name nginx-deployment --kubeconfig ~/.kube/config # Inject scheduling failure using node-selector blade create k8s pod-pod schedulingfailure --namespace default --workload-type deployment --workload-name nginx-deployment --affinity-type node-selector --kubeconfig ~/.kube/config`, ) case *ImagePullSecretsErrorActionSpec: action.SetLongDesc("Simulate image pull authentication failure by corrupting the credentials in the Secret referenced by the Pod's imagePullSecrets. The original Secret is backed up and restored when the experiment is destroyed.") action.SetExample( `# Simulate image pull authentication failure for a specific pod blade create k8s pod-pod imagepullsecretserror --names my-app-pod --namespace default --kubeconfig ~/.kube/config # Simulate image pull authentication failure for pods selected by labels blade create k8s pod-pod imagepullsecretserror --labels app=nginx --namespace default --kubeconfig ~/.kube/config # Corrupt only a specific imagePullSecret blade create k8s pod-pod imagepullsecretserror --names my-app-pod --namespace default --secret-name my-registry-secret --kubeconfig ~/.kube/config`, ) case *PodTaintNodeActionSpec: action.SetLongDesc("Make pod scheduling fail by adding unreachable taint to nodes. The scheduler will not schedule Pods without matching tolerations to the tainted nodes, causing the Pods to remain in Pending state.") action.SetExample( `# Add unreachable taint to nodes to prevent pod scheduling blade create k8s pod-pod taintnode --nodes node1,node2 --kubeconfig ~/.kube/config # Add taint with NoExecute effect (will evict running pods) blade create k8s pod-pod taintnode --nodes node1 --taint-effect NoExecute --kubeconfig ~/.kube/config`, ) case *ConfigMapDeleteActionSpec: action.SetLongDesc("Delete a ConfigMap that a Pod depends on, then restart the Pod to simulate startup failure caused by missing ConfigMap. The original ConfigMap is backed up and restored when the experiment is destroyed.") action.SetExample( `# Delete the auto-selected required ConfigMap for pods matching labels blade create k8s pod-pod configmapdelete --labels "app=test" --namespace default --kubeconfig ~/.kube/config # Delete a specific ConfigMap blade create k8s pod-pod configmapdelete --labels "app=test" --namespace default --configmap-name my-config --kubeconfig ~/.kube/config`, ) case *BadResourceSizeActionSpec: action.SetLongDesc("Modify the CPU/Memory resource limits of a workload (Deployment/DaemonSet/StatefulSet) to simulate incorrect resource sizing. The original resource configuration is backed up and restored when the experiment is destroyed.") action.SetExample( `# Set CPU resource limit for a deployment blade create k8s pod-pod badresourcesize --namespace default --workload-type deployment --workload-name nginx-app --cpu 1m --kubeconfig ~/.kube/config # Set memory resource limit for a deployment blade create k8s pod-pod badresourcesize --namespace default --workload-type deployment --workload-name nginx-app --mem 128m --kubeconfig ~/.kube/config # Set both CPU and memory resource limits for a deployment blade create k8s pod-pod badresourcesize --namespace default --workload-type deployment --workload-name nginx-app --cpu 1m --mem 128m --kubeconfig ~/.kube/config # Set CPU resource limit for a daemonset blade create k8s pod-pod badresourcesize --namespace default --workload-type daemonset --workload-name nginx-ds --cpu 1m --kubeconfig ~/.kube/config # Set memory resource limit for a statefulset blade create k8s pod-pod badresourcesize --namespace default --workload-type statefulset --workload-name nginx-sts --mem 128m --kubeconfig ~/.kube/config`, ) case *FailedMountActionSpec: action.SetLongDesc("Mount a non-existent ConfigMap/Secret/PVC volume to the target workload (Deployment/DaemonSet/StatefulSet) to simulate volume mount failure. The injected volume configuration is tracked and removed when the experiment is destroyed.") action.SetExample( `# Mount a non-existent configmap volume to a deployment blade create k8s pod-pod failedmount --namespace default --workload-type deployment --workload-name nginx-app --volume-type configmap --kubeconfig ~/.kube/config # Mount a non-existent secret volume to init containers of a deployment blade create k8s pod-pod failedmount --namespace default --workload-type deployment --workload-name nginx-app --volume-type secret --with-initcontainer true --kubeconfig ~/.kube/config # Mount a non-existent pvc volume to a statefulset blade create k8s pod-pod failedmount --namespace default --workload-type statefulset --workload-name redis-app --volume-type pvc --kubeconfig ~/.kube/config`, ) default: action.SetExample(strings.Replace( action.Example(), fmt.Sprintf("blade create %s %s", expModelSpec.Name(), action.Name()), fmt.Sprintf("blade create k8s pod-%s %s --names nginx-app --kubeconfig ~/.kube/config --namespace default", expModelSpec.Name(), action.Name()), -1, )) action.SetExample(strings.Replace( action.Example(), fmt.Sprintf("blade c %s %s", expModelSpec.Name(), action.Name()), fmt.Sprintf("blade c k8s pod-%s %s --names nginx-app --kubeconfig ~/.kube/config --namespace default", expModelSpec.Name(), action.Name()), -1, )) } } } } func getResourceFlags() []spec.ExpFlagSpec { coverageFlags := model.GetResourceCoverageFlags() commonFlags := model.GetResourceCommonFlags() chaosbladeFlags := model.GetChaosBladeFlags() return append(append(coverageFlags, commonFlags...), chaosbladeFlags...) } type SelfExpModelCommandSpec struct { spec.BaseExpModelCommandSpec } func NewSelfExpModelCommandSpec(client *channel.Client) spec.ExpModelCommandSpec { return &SelfExpModelCommandSpec{ spec.BaseExpModelCommandSpec{ ExpFlags: []spec.ExpFlagSpec{}, ExpActions: []spec.ExpActionCommandSpec{ NewDeletePodActionSpec(client), NewPodIOActionSpec(client), NewFailPodActionSpec(client), NewPodTerminatingActionSpec(client), NewPodContainerCreatingActionSpec(client), NewPodContainerCreatingDiskActionSpec(client), NewPodSchedulingFailureActionSpec(client), NewImageConfigActionSpec(client), NewImagePullSecretsErrorActionSpec(client), NewConfigMapDeleteActionSpec(client), NewPodTaintNodeActionSpec(client), NewBadResourceSizeActionSpec(client), NewFailedMountActionSpec(client), }, }, } } func (*SelfExpModelCommandSpec) Name() string { return "pod" } func (*SelfExpModelCommandSpec) ShortDesc() string { return "Pod experiments" } func (*SelfExpModelCommandSpec) LongDesc() string { return "Pod experiments" } func (*SelfExpModelCommandSpec) Example() string { return "blade c k8s pod-pod delete --names redis-slave-674d68586-n5s4q --namespace default --kubeconfig ~/.kube/config" } ================================================ FILE: exec/pod/schedulingfailure.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "encoding/json" "fmt" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ( // ChaosBladeDeploymentAnnotation indicates the deployment resource is modified ChaosBladeDeploymentAnnotation = "chaosblade.io/deployment" // ChaosBladeDaemonSetAnnotation indicates the daemonset resource is modified ChaosBladeDaemonSetAnnotation = "chaosblade.io/daemonset" // ChaosBladeStatefulSetAnnotation indicates the statefulset resource is modified ChaosBladeStatefulSetAnnotation = "chaosblade.io/statefulset" // ChaosBladeModifyAction indicates modify action ChaosBladeModifyAction = "modify" // ChaosBladeOriginalNodeAffinityAnnotation stores the original node affinity configuration ChaosBladeOriginalNodeAffinityAnnotation = "chaosblade.io/original-node-affinity" // ChaosBladeOriginalPodAffinityAnnotation stores the original pod affinity configuration ChaosBladeOriginalPodAffinityAnnotation = "chaosblade.io/original-pod-affinity" // ChaosBladeOriginalPodAntiAffinityAnnotation stores the original pod anti-affinity configuration ChaosBladeOriginalPodAntiAffinityAnnotation = "chaosblade.io/original-pod-anti-affinity" // ChaosBladeOriginalNodeSelectorAnnotation stores the original node selector configuration ChaosBladeOriginalNodeSelectorAnnotation = "chaosblade.io/original-nodeselector" // ChaosBladeAffinityTypeAnnotation records which affinity type was injected ChaosBladeAffinityTypeAnnotation = "chaosblade.io/affinity-type" // ChaosBladeSchedulingFailureAction indicates scheduling failure action ChaosBladeSchedulingFailureAction = "schedulingfailure" // UnreachableNodeLabel is a label that no node will have UnreachableNodeLabelKey = "chaosblade.io/unreachable" UnreachableNodeLabelValue = "true" ) type PodSchedulingFailureActionSpec struct { spec.BaseExpActionCommandSpec client *channel.Client } func NewPodSchedulingFailureActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &PodSchedulingFailureActionSpec{ BaseExpActionCommandSpec: spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: "workload-type", Desc: "Workload type: deployment, daemonset, statefulset. Default: deployment", Required: false, Default: "deployment", }, &spec.ExpFlag{ Name: "workload-name", Desc: "Workload name to inject scheduling failure", Required: true, }, &spec.ExpFlag{ Name: "affinity-type", Desc: "Affinity type to inject: node-affinity, node-selector, pod-affinity, pod-anti-affinity. Default: node-affinity", Required: false, Default: "node-affinity", }, }, ActionExecutor: &PodSchedulingFailureActionExecutor{client: client}, ActionExample: `# Inject scheduling failure to a deployment by node affinity blade create k8s pod-pod schedulingfailure --namespace default --workload-type deployment --workload-name nginx-deployment --kubeconfig ~/.kube/config # Inject scheduling failure using node-selector blade create k8s pod-pod schedulingfailure --namespace default --workload-type deployment --workload-name nginx-deployment --affinity-type node-selector --kubeconfig ~/.kube/config `, ActionCategories: []string{model.CategorySystemContainer}, }, client: client, } } func (*PodSchedulingFailureActionSpec) Name() string { return "schedulingfailure" } func (*PodSchedulingFailureActionSpec) Aliases() []string { return []string{} } func (*PodSchedulingFailureActionSpec) ShortDesc() string { return "Make pod scheduling fail by injecting unreachable affinity rules" } func (*PodSchedulingFailureActionSpec) LongDesc() string { return "Simulate the scenario where a Pod cannot be scheduled due to affinity configuration issues. " + "This fault is injected by modifying the target workload's (Deployment/DaemonSet/StatefulSet) Pod template " + "to add an unreachable node affinity or node selector. The scheduler will not find any node matching the rules, " + "causing the Pod to remain in Pending state. When the experiment is destroyed, the original affinity " + "configuration will be restored." } type PodSchedulingFailureActionExecutor struct { client *channel.Client } func (*PodSchedulingFailureActionExecutor) Name() string { return "schedulingfailure" } func (*PodSchedulingFailureActionExecutor) SetChannel(channel spec.Channel) {} func (d *PodSchedulingFailureActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(uid, ctx, expModel) } return d.create(uid, ctx, expModel) } func (d *PodSchedulingFailureActionExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) // Parse flags namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] affinityType := expModel.ActionFlags["affinity-type"] if affinityType == "" { affinityType = "node-affinity" } // Validate required flags if namespace == "" { util.Errorf(uid, util.GetRunFuncName(), "namespace is required") return spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if workloadName == "" { util.Errorf(uid, util.GetRunFuncName(), "workload-name is required") return spec.ResponseFailWithFlags(spec.ParameterLess, "workload-name") } status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: fmt.Sprintf("%s//%s//%s", namespace, workloadType, workloadName), } // Get and modify the workload switch workloadType { case "deployment": deployment := &appsv1.Deployment{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, deployment) if err != nil { if apierrors.IsNotFound(err) { logrusField.Warningf("deployment %s/%s not found", namespace, workloadName) status = status.CreateFailResourceStatus(fmt.Sprintf("deployment not found: %v", err), spec.K8sExecFailed.Code) } else { logrusField.Warningf("get deployment %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("get deployment failed: %v", err), spec.K8sExecFailed.Code) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } // Inject scheduling failure if err := d.injectDeploymentSchedulingFailure(ctx, deployment, affinityType, experimentId); err != nil { logrusField.Warningf("inject scheduling failure to deployment %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("inject scheduling failure failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("injected scheduling failure to deployment %s/%s with affinity type %s", namespace, workloadName, affinityType) case "daemonset": daemonset := &appsv1.DaemonSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, daemonset) if err != nil { if apierrors.IsNotFound(err) { logrusField.Warningf("daemonset %s/%s not found", namespace, workloadName) status = status.CreateFailResourceStatus(fmt.Sprintf("daemonset not found: %v", err), spec.K8sExecFailed.Code) } else { logrusField.Warningf("get daemonset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("get daemonset failed: %v", err), spec.K8sExecFailed.Code) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } if err := d.injectDaemonSetSchedulingFailure(ctx, daemonset, affinityType, experimentId); err != nil { logrusField.Warningf("inject scheduling failure to daemonset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("inject scheduling failure failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("injected scheduling failure to daemonset %s/%s with affinity type %s", namespace, workloadName, affinityType) case "statefulset": statefulset := &appsv1.StatefulSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, statefulset) if err != nil { if apierrors.IsNotFound(err) { logrusField.Warningf("statefulset %s/%s not found", namespace, workloadName) status = status.CreateFailResourceStatus(fmt.Sprintf("statefulset not found: %v", err), spec.K8sExecFailed.Code) } else { logrusField.Warningf("get statefulset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("get statefulset failed: %v", err), spec.K8sExecFailed.Code) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } if err := d.injectStatefulSetSchedulingFailure(ctx, statefulset, affinityType, experimentId); err != nil { logrusField.Warningf("inject scheduling failure to statefulset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("inject scheduling failure failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("injected scheduling failure to statefulset %s/%s with affinity type %s", namespace, workloadName, affinityType) default: status = status.CreateFailResourceStatus(fmt.Sprintf("unsupported workload type: %s", workloadType), spec.ParameterIllegal.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } status = status.CreateSuccessResourceStatus() return spec.ReturnResultIgnoreCode(v1alpha1.CreateSuccessExperimentStatus([]v1alpha1.ResourceStatus{status})) } // handleGetError handles errors from client.Get operations in destroy. // Returns true if the error was handled (NotFound or other error), false otherwise. // When returning true, the response pointer is set with the appropriate status. func handleGetError(err error, namespace, workloadType, workloadName string, status *v1alpha1.ResourceStatus, logrusField *logrus.Entry) (*spec.Response, bool) { if err == nil { return nil, false } if apierrors.IsNotFound(err) { logrusField.Infof("%s %s/%s already deleted", workloadType, namespace, workloadName) *status = status.CreateSuccessResourceStatus() status.State = v1alpha1.DestroyedState return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{*status})), true } logrusField.Warningf("get %s %s/%s failed: %v", workloadType, namespace, workloadName, err) *status = status.CreateFailResourceStatus(fmt.Sprintf("get %s failed: %v", workloadType, err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{*status})), true } func (d *PodSchedulingFailureActionExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) // Parse flags namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: fmt.Sprintf("%s//%s//%s", namespace, workloadType, workloadName), } // Restore the workload switch workloadType { case "deployment": deployment := &appsv1.Deployment{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, deployment) if resp, handled := handleGetError(err, namespace, workloadType, workloadName, &status, logrusField); handled { return resp } if err := d.restoreDeployment(ctx, deployment, experimentId); err != nil { logrusField.Warningf("restore deployment %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("restore deployment failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("restored deployment %s/%s", namespace, workloadName) case "daemonset": daemonset := &appsv1.DaemonSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, daemonset) if resp, handled := handleGetError(err, namespace, workloadType, workloadName, &status, logrusField); handled { return resp } if err := d.restoreDaemonSet(ctx, daemonset, experimentId); err != nil { logrusField.Warningf("restore daemonset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("restore daemonset failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("restored daemonset %s/%s", namespace, workloadName) case "statefulset": statefulset := &appsv1.StatefulSet{} err := d.client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: workloadName}, statefulset) if resp, handled := handleGetError(err, namespace, workloadType, workloadName, &status, logrusField); handled { return resp } if err := d.restoreStatefulSet(ctx, statefulset, experimentId); err != nil { logrusField.Warningf("restore statefulset %s/%s failed: %v", namespace, workloadName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("restore statefulset failed: %v", err), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } logrusField.Infof("restored statefulset %s/%s", namespace, workloadName) default: status = status.CreateFailResourceStatus(fmt.Sprintf("unsupported workload type: %s", workloadType), spec.ParameterIllegal.Code) return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus(status.Error, []v1alpha1.ResourceStatus{status})) } status = status.CreateSuccessResourceStatus() status.State = v1alpha1.DestroyedState return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus([]v1alpha1.ResourceStatus{status})) } // ensureNoConflictingExperiment checks whether the workload is already modified by // another chaosblade experiment. This prevents overwriting backup data from a // running experiment, which would make the workload unrecoverable on destroy. func ensureNoConflictingExperiment(annotations map[string]string, experimentId string) error { if existingId, ok := annotations[ChaosBladeExperimentAnnotation]; ok && existingId != "" && existingId != experimentId { return fmt.Errorf("workload is already modified by another chaosblade experiment: %s", existingId) } return nil } // injectDeploymentSchedulingFailure injects scheduling failure to a Deployment func (d *PodSchedulingFailureActionExecutor) injectDeploymentSchedulingFailure(ctx context.Context, deployment *appsv1.Deployment, affinityType, experimentId string) error { if deployment.Annotations == nil { deployment.Annotations = make(map[string]string) } if err := ensureNoConflictingExperiment(deployment.Annotations, experimentId); err != nil { return err } // Idempotent: if already modified by the same experiment, skip re-injection // to avoid overwriting the saved original affinity backup. if deployment.Annotations[ChaosBladeExperimentAnnotation] == experimentId { return nil } deployment.Annotations[ChaosBladeDeploymentAnnotation] = ChaosBladeModifyAction deployment.Annotations[ChaosBladeExperimentAnnotation] = experimentId // Backup and inject affinity if err := d.backupAndInjectAffinity(&deployment.Spec.Template.Spec, deployment.Annotations, affinityType, deployment.Spec.Template.Labels); err != nil { return err } return d.client.Update(ctx, deployment) } // injectDaemonSetSchedulingFailure injects scheduling failure to a DaemonSet func (d *PodSchedulingFailureActionExecutor) injectDaemonSetSchedulingFailure(ctx context.Context, daemonset *appsv1.DaemonSet, affinityType, experimentId string) error { if daemonset.Annotations == nil { daemonset.Annotations = make(map[string]string) } if err := ensureNoConflictingExperiment(daemonset.Annotations, experimentId); err != nil { return err } if daemonset.Annotations[ChaosBladeExperimentAnnotation] == experimentId { return nil } daemonset.Annotations[ChaosBladeDaemonSetAnnotation] = ChaosBladeModifyAction daemonset.Annotations[ChaosBladeExperimentAnnotation] = experimentId if err := d.backupAndInjectAffinity(&daemonset.Spec.Template.Spec, daemonset.Annotations, affinityType, daemonset.Spec.Template.Labels); err != nil { return err } return d.client.Update(ctx, daemonset) } // injectStatefulSetSchedulingFailure injects scheduling failure to a StatefulSet func (d *PodSchedulingFailureActionExecutor) injectStatefulSetSchedulingFailure(ctx context.Context, statefulset *appsv1.StatefulSet, affinityType, experimentId string) error { if statefulset.Annotations == nil { statefulset.Annotations = make(map[string]string) } if err := ensureNoConflictingExperiment(statefulset.Annotations, experimentId); err != nil { return err } if statefulset.Annotations[ChaosBladeExperimentAnnotation] == experimentId { return nil } statefulset.Annotations[ChaosBladeStatefulSetAnnotation] = ChaosBladeModifyAction statefulset.Annotations[ChaosBladeExperimentAnnotation] = experimentId if err := d.backupAndInjectAffinity(&statefulset.Spec.Template.Spec, statefulset.Annotations, affinityType, statefulset.Spec.Template.Labels); err != nil { return err } return d.client.Update(ctx, statefulset) } // backupAndInjectAffinity backs up original affinity and injects unreachable affinity rules // podLabels is used by pod-anti-affinity to target the workload's own labels func (d *PodSchedulingFailureActionExecutor) backupAndInjectAffinity(podSpec *v1.PodSpec, annotations map[string]string, affinityType string, podLabels map[string]string) error { annotations[ChaosBladeAffinityTypeAnnotation] = affinityType switch affinityType { case "node-affinity": // Backup original affinity if podSpec.Affinity != nil && podSpec.Affinity.NodeAffinity != nil { originalBytes, err := json.Marshal(podSpec.Affinity.NodeAffinity) if err != nil { return fmt.Errorf("marshal original node affinity failed: %v", err) } annotations[ChaosBladeOriginalNodeAffinityAnnotation] = string(originalBytes) } // Inject unreachable node affinity unreachableAffinity := &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ NodeSelectorTerms: []v1.NodeSelectorTerm{ { MatchExpressions: []v1.NodeSelectorRequirement{ { Key: UnreachableNodeLabelKey, Operator: v1.NodeSelectorOpIn, Values: []string{UnreachableNodeLabelValue}, }, }, }, }, }, } if podSpec.Affinity == nil { podSpec.Affinity = &v1.Affinity{} } podSpec.Affinity.NodeAffinity = unreachableAffinity case "node-selector": // Backup original node selector if len(podSpec.NodeSelector) > 0 { originalBytes, err := json.Marshal(podSpec.NodeSelector) if err != nil { return fmt.Errorf("marshal original node selector failed: %v", err) } annotations[ChaosBladeOriginalNodeSelectorAnnotation] = string(originalBytes) } // Inject unreachable node selector podSpec.NodeSelector = map[string]string{ UnreachableNodeLabelKey: UnreachableNodeLabelValue, } case "pod-affinity": // Backup original pod affinity if podSpec.Affinity != nil && podSpec.Affinity.PodAffinity != nil { originalBytes, err := json.Marshal(podSpec.Affinity.PodAffinity) if err != nil { return fmt.Errorf("marshal original pod affinity failed: %v", err) } annotations[ChaosBladeOriginalPodAffinityAnnotation] = string(originalBytes) } // Inject unreachable pod affinity unreachablePodAffinity := &v1.PodAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ { LabelSelector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{ { Key: UnreachableNodeLabelKey, Operator: metav1.LabelSelectorOpIn, Values: []string{UnreachableNodeLabelValue}, }, }, }, TopologyKey: "kubernetes.io/hostname", }, }, } if podSpec.Affinity == nil { podSpec.Affinity = &v1.Affinity{} } podSpec.Affinity.PodAffinity = unreachablePodAffinity case "pod-anti-affinity": // Backup original pod anti-affinity if podSpec.Affinity != nil && podSpec.Affinity.PodAntiAffinity != nil { originalBytes, err := json.Marshal(podSpec.Affinity.PodAntiAffinity) if err != nil { return fmt.Errorf("marshal original pod anti-affinity failed: %v", err) } annotations[ChaosBladeOriginalPodAntiAffinityAnnotation] = string(originalBytes) } // Inject pod anti-affinity against the workload's own labels // This creates a "one pod per node" constraint: new pods can't be scheduled // on nodes that already have a pod with the same labels. // With enough replicas (> number of available nodes), pods will be Pending. var matchExpressions []metav1.LabelSelectorRequirement for key, value := range podLabels { matchExpressions = append(matchExpressions, metav1.LabelSelectorRequirement{ Key: key, Operator: metav1.LabelSelectorOpIn, Values: []string{value}, }) } if len(matchExpressions) == 0 { return fmt.Errorf("pod template has no labels, cannot inject pod-anti-affinity") } unreachablePodAntiAffinity := &v1.PodAntiAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ { LabelSelector: &metav1.LabelSelector{ MatchExpressions: matchExpressions, }, TopologyKey: "kubernetes.io/hostname", }, }, } if podSpec.Affinity == nil { podSpec.Affinity = &v1.Affinity{} } podSpec.Affinity.PodAntiAffinity = unreachablePodAntiAffinity default: return fmt.Errorf("unsupported affinity type: %s", affinityType) } return nil } // PreCreate implements model.ActionPreProcessor interface. // It validates the required flags and prepares the context for schedulingfailure action. func (a *PodSchedulingFailureActionSpec) PreCreate(ctx context.Context, expModel *spec.ExpModel, client *channel.Client) (context.Context, *spec.Response) { // Validate required flags namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] if namespace == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if strings.Contains(namespace, ",") { return ctx, spec.ResponseFailWithFlags(spec.ParameterInvalidNSNotOne, model.ResourceNamespaceFlag.Name) } if workloadName == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, "workload-name") } containerObjectMetaList := model.ContainerMatchedList{ model.ContainerObjectMeta{ Namespace: namespace, PodName: fmt.Sprintf("chaosblade-sf-%s-%s", workloadType, workloadName), }, } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } // PreDestroy implements model.ActionPreProcessor interface. // It prepares the context for schedulingfailure destroy flow. // Unlike the default destroy path, it always attempts to restore the workload // regardless of the old experiment status. func (a *PodSchedulingFailureActionSpec) PreDestroy(ctx context.Context, expModel *spec.ExpModel, client *channel.Client, oldExpStatus v1alpha1.ExperimentStatus) (context.Context, *spec.Response) { namespace := expModel.ActionFlags[model.ResourceNamespaceFlag.Name] workloadType := expModel.ActionFlags["workload-type"] if workloadType == "" { workloadType = "deployment" } workloadName := expModel.ActionFlags["workload-name"] if namespace == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, model.ResourceNamespaceFlag.Name) } if strings.Contains(namespace, ",") { return ctx, spec.ResponseFailWithFlags(spec.ParameterInvalidNSNotOne, model.ResourceNamespaceFlag.Name) } if workloadName == "" { return ctx, spec.ResponseFailWithFlags(spec.ParameterLess, "workload-name") } containerObjectMetaList := model.ContainerMatchedList{ model.ContainerObjectMeta{ Namespace: namespace, PodName: fmt.Sprintf("chaosblade-sf-%s-%s", workloadType, workloadName), }, } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } // restoreDeployment restores a Deployment's original affinity configuration func (d *PodSchedulingFailureActionExecutor) restoreDeployment(ctx context.Context, deployment *appsv1.Deployment, experimentId string) error { // Verify this deployment was modified by the same experiment if deployment.Annotations[ChaosBladeExperimentAnnotation] != experimentId { return fmt.Errorf("deployment was not modified by experiment %s", experimentId) } if err := d.restoreAffinity(&deployment.Spec.Template.Spec, deployment.Annotations); err != nil { return err } // Clean up annotations delete(deployment.Annotations, ChaosBladeDeploymentAnnotation) delete(deployment.Annotations, ChaosBladeExperimentAnnotation) delete(deployment.Annotations, ChaosBladeAffinityTypeAnnotation) delete(deployment.Annotations, ChaosBladeOriginalNodeAffinityAnnotation) delete(deployment.Annotations, ChaosBladeOriginalPodAffinityAnnotation) delete(deployment.Annotations, ChaosBladeOriginalPodAntiAffinityAnnotation) delete(deployment.Annotations, ChaosBladeOriginalNodeSelectorAnnotation) return d.client.Update(ctx, deployment) } // restoreDaemonSet restores a DaemonSet's original affinity configuration func (d *PodSchedulingFailureActionExecutor) restoreDaemonSet(ctx context.Context, daemonset *appsv1.DaemonSet, experimentId string) error { if daemonset.Annotations[ChaosBladeExperimentAnnotation] != experimentId { return fmt.Errorf("daemonset was not modified by experiment %s", experimentId) } if err := d.restoreAffinity(&daemonset.Spec.Template.Spec, daemonset.Annotations); err != nil { return err } delete(daemonset.Annotations, ChaosBladeDaemonSetAnnotation) delete(daemonset.Annotations, ChaosBladeExperimentAnnotation) delete(daemonset.Annotations, ChaosBladeAffinityTypeAnnotation) delete(daemonset.Annotations, ChaosBladeOriginalNodeAffinityAnnotation) delete(daemonset.Annotations, ChaosBladeOriginalPodAffinityAnnotation) delete(daemonset.Annotations, ChaosBladeOriginalPodAntiAffinityAnnotation) delete(daemonset.Annotations, ChaosBladeOriginalNodeSelectorAnnotation) return d.client.Update(ctx, daemonset) } // restoreStatefulSet restores a StatefulSet's original affinity configuration func (d *PodSchedulingFailureActionExecutor) restoreStatefulSet(ctx context.Context, statefulset *appsv1.StatefulSet, experimentId string) error { if statefulset.Annotations[ChaosBladeExperimentAnnotation] != experimentId { return fmt.Errorf("statefulset was not modified by experiment %s", experimentId) } if err := d.restoreAffinity(&statefulset.Spec.Template.Spec, statefulset.Annotations); err != nil { return err } delete(statefulset.Annotations, ChaosBladeStatefulSetAnnotation) delete(statefulset.Annotations, ChaosBladeExperimentAnnotation) delete(statefulset.Annotations, ChaosBladeAffinityTypeAnnotation) delete(statefulset.Annotations, ChaosBladeOriginalNodeAffinityAnnotation) delete(statefulset.Annotations, ChaosBladeOriginalPodAffinityAnnotation) delete(statefulset.Annotations, ChaosBladeOriginalPodAntiAffinityAnnotation) delete(statefulset.Annotations, ChaosBladeOriginalNodeSelectorAnnotation) return d.client.Update(ctx, statefulset) } // restoreAffinity restores only the affinity field that was modified during injection. // It uses the ChaosBladeAffinityTypeAnnotation to determine which field to restore, // avoiding unintentional clearing of pre-existing affinity/selector settings. func (d *PodSchedulingFailureActionExecutor) restoreAffinity(podSpec *v1.PodSpec, annotations map[string]string) error { affinityType := annotations[ChaosBladeAffinityTypeAnnotation] if affinityType == "" { return fmt.Errorf("affinity type annotation not found, cannot determine which field to restore") } switch affinityType { case "node-affinity": if originalNodeAffinityStr, ok := annotations[ChaosBladeOriginalNodeAffinityAnnotation]; ok { var nodeAffinity v1.NodeAffinity if err := json.Unmarshal([]byte(originalNodeAffinityStr), &nodeAffinity); err != nil { return fmt.Errorf("unmarshal original node affinity failed: %v", err) } if podSpec.Affinity == nil { podSpec.Affinity = &v1.Affinity{} } podSpec.Affinity.NodeAffinity = &nodeAffinity } else { if podSpec.Affinity != nil { podSpec.Affinity.NodeAffinity = nil } } case "pod-affinity": if originalPodAffinityStr, ok := annotations[ChaosBladeOriginalPodAffinityAnnotation]; ok { var podAffinity v1.PodAffinity if err := json.Unmarshal([]byte(originalPodAffinityStr), &podAffinity); err != nil { return fmt.Errorf("unmarshal original pod affinity failed: %v", err) } if podSpec.Affinity == nil { podSpec.Affinity = &v1.Affinity{} } podSpec.Affinity.PodAffinity = &podAffinity } else { if podSpec.Affinity != nil { podSpec.Affinity.PodAffinity = nil } } case "pod-anti-affinity": if originalPodAntiAffinityStr, ok := annotations[ChaosBladeOriginalPodAntiAffinityAnnotation]; ok { var podAntiAffinity v1.PodAntiAffinity if err := json.Unmarshal([]byte(originalPodAntiAffinityStr), &podAntiAffinity); err != nil { return fmt.Errorf("unmarshal original pod anti-affinity failed: %v", err) } if podSpec.Affinity == nil { podSpec.Affinity = &v1.Affinity{} } podSpec.Affinity.PodAntiAffinity = &podAntiAffinity } else { if podSpec.Affinity != nil { podSpec.Affinity.PodAntiAffinity = nil } } case "node-selector": if originalNodeSelectorStr, ok := annotations[ChaosBladeOriginalNodeSelectorAnnotation]; ok { var originalNodeSelector map[string]string if err := json.Unmarshal([]byte(originalNodeSelectorStr), &originalNodeSelector); err != nil { return fmt.Errorf("unmarshal original node selector failed: %v", err) } podSpec.NodeSelector = originalNodeSelector } else { podSpec.NodeSelector = nil } default: return fmt.Errorf("unknown affinity type in annotation: %s", affinityType) } // Clean up empty Affinity struct if podSpec.Affinity != nil && podSpec.Affinity.NodeAffinity == nil && podSpec.Affinity.PodAffinity == nil && podSpec.Affinity.PodAntiAffinity == nil { podSpec.Affinity = nil } return nil } ================================================ FILE: exec/pod/taintnode.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "encoding/json" "fmt" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ( // ChaosBladeTaintAnnotation indicates the node taint was modified by chaosblade ChaosBladeTaintAnnotation = "chaosblade.io/taint" // ChaosBladeOriginalTaintsAnnotation stores the original node taints ChaosBladeOriginalTaintsAnnotation = "chaosblade.io/original-taints" // ChaosBladeInjectedTaintAnnotation stores the taint injected by this experiment ChaosBladeInjectedTaintAnnotation = "chaosblade.io/injected-taint" // DefaultTaintKey is the default taint key for injection DefaultTaintKey = "chaosblade.io/unreachable" // DefaultTaintValue is the default taint value for injection DefaultTaintValue = "true" // DefaultTaintEffect is the default taint effect DefaultTaintEffect = "NoSchedule" ) // chaosBladeTaintAnnotations returns the taintnode-owned annotation keys for cleanup. func chaosBladeTaintAnnotations() []string { return []string{ ChaosBladeOriginalTaintsAnnotation, ChaosBladeInjectedTaintAnnotation, ChaosBladeExperimentAnnotation, } } type PodTaintNodeActionSpec struct { spec.BaseExpActionCommandSpec client *channel.Client } func NewPodTaintNodeActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &PodTaintNodeActionSpec{ BaseExpActionCommandSpec: spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: "nodes", Desc: "Node names to inject taint, multiple values separated by commas", Required: true, }, &spec.ExpFlag{ Name: "taint-effect", Desc: "Taint effect: NoSchedule (default), NoExecute, PreferNoSchedule. WARNING: NoExecute will evict running pods without matching tolerations", NoArgs: false, Default: DefaultTaintEffect, }, &spec.ExpFlag{ Name: "taint-key", Desc: "Custom taint key. Default: chaosblade.io/unreachable", NoArgs: false, Default: DefaultTaintKey, }, &spec.ExpFlag{ Name: "taint-value", Desc: "Custom taint value. Default: true", NoArgs: false, Default: DefaultTaintValue, }, }, ActionExecutor: &PodTaintNodeActionExecutor{client: client}, ActionExample: `# Add unreachable taint to nodes to prevent pod scheduling blade create k8s pod-pod taintnode --nodes node1,node2 --kubeconfig ~/.kube/config # Add taint with NoExecute effect (will evict running pods) blade create k8s pod-pod taintnode --nodes node1 --taint-effect NoExecute --kubeconfig ~/.kube/config # Add custom taint blade create k8s pod-pod taintnode --nodes node1 --taint-key dedicated --taint-value gpu --taint-effect NoSchedule --kubeconfig ~/.kube/config `, ActionCategories: []string{model.CategorySystemContainer}, }, client: client, } } func (*PodTaintNodeActionSpec) Name() string { return "taintnode" } func (*PodTaintNodeActionSpec) Aliases() []string { return []string{} } func (*PodTaintNodeActionSpec) ShortDesc() string { return "Make pod scheduling fail by adding unreachable taint to nodes" } func (*PodTaintNodeActionSpec) LongDesc() string { return "Simulate the scenario where a Pod cannot be scheduled due to Taint/Toleration mismatch. " + "This fault is injected by adding an unreachable taint to the target nodes. " + "Pods without matching tolerations will not be scheduled to these nodes. " + "When the experiment is destroyed, the original taints will be restored. " + "WARNING: Using NoExecute effect will evict running pods that do not have matching tolerations." } type PodTaintNodeActionExecutor struct { client *channel.Client } func (*PodTaintNodeActionExecutor) Name() string { return "taintnode" } func (*PodTaintNodeActionExecutor) SetChannel(channel spec.Channel) {} func (d *PodTaintNodeActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(uid, ctx, expModel) } return d.create(uid, ctx, expModel) } // parseTaintNodeFlags parses and validates flags for taintnode action. func parseTaintNodeFlags(expModel *spec.ExpModel) (nodeNames []string, taintKey, taintValue, taintEffect string, err error) { nodesFlag := expModel.ActionFlags["nodes"] if nodesFlag == "" { return nil, "", "", "", fmt.Errorf("nodes flag is required") } nodeNames, err = parseNodeNames(nodesFlag) if err != nil { return nil, "", "", "", err } taintKey = expModel.ActionFlags["taint-key"] if taintKey == "" { taintKey = DefaultTaintKey } taintValue = expModel.ActionFlags["taint-value"] if taintValue == "" { taintValue = DefaultTaintValue } taintEffect = expModel.ActionFlags["taint-effect"] if taintEffect == "" { taintEffect = DefaultTaintEffect } // Validate taint effect switch taintEffect { case string(v1.TaintEffectNoSchedule), string(v1.TaintEffectNoExecute), string(v1.TaintEffectPreferNoSchedule): default: return nil, "", "", "", fmt.Errorf("unsupported taint effect: %s, supported values: NoSchedule, NoExecute, PreferNoSchedule", taintEffect) } return nodeNames, taintKey, taintValue, taintEffect, nil } func (d *PodTaintNodeActionExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) nodeNames, taintKey, taintValue, taintEffect, err := parseTaintNodeFlags(expModel) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithFlags(spec.ParameterIllegal, err.Error()) } var resourceStatuses []v1alpha1.ResourceStatus for _, nodeName := range nodeNames { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.NodeKind, Identifier: nodeName, } if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { // Re-get latest node to avoid conflict latest := &v1.Node{} if err := d.client.Get(ctx, types.NamespacedName{Name: nodeName}, latest); err != nil { return err } return d.injectTaintToNode(ctx, latest, taintKey, taintValue, taintEffect, experimentId) }); err != nil { logrusField.Warningf("inject taint to node %s failed: %v", nodeName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("inject taint to node %s failed: %v", nodeName, err), spec.K8sExecFailed.Code) resourceStatuses = append(resourceStatuses, status) continue } logrusField.Infof("injected taint %s=%s:%s to node %s", taintKey, taintValue, taintEffect, nodeName) status = status.CreateSuccessResourceStatus() resourceStatuses = append(resourceStatuses, status) } // Check if all nodes failed allFailed := true for _, s := range resourceStatuses { if s.Success { allFailed = false break } } if allFailed { return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus("all nodes failed", resourceStatuses)) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateSuccessExperimentStatus(resourceStatuses)) } func (d *PodTaintNodeActionExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) nodesFlag := expModel.ActionFlags["nodes"] nodeNames, err := parseNodeNames(nodesFlag) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithFlags(spec.ParameterIllegal, err.Error()) } var resourceStatuses []v1alpha1.ResourceStatus for _, nodeName := range nodeNames { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.NodeKind, Identifier: nodeName, } if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { // Re-get latest node to avoid conflict latest := &v1.Node{} if err := d.client.Get(ctx, types.NamespacedName{Name: nodeName}, latest); err != nil { return err } return d.restoreNodeTaints(ctx, latest, experimentId) }); err != nil { logrusField.Warningf("restore node %s taints failed: %v", nodeName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("restore node %s taints failed: %v", nodeName, err), spec.K8sExecFailed.Code) resourceStatuses = append(resourceStatuses, status) continue } logrusField.Infof("restored node %s taints", nodeName) status = status.CreateSuccessResourceStatus() status.State = v1alpha1.DestroyedState resourceStatuses = append(resourceStatuses, status) } allFailed := true for _, s := range resourceStatuses { if s.Success { allFailed = false break } } if allFailed { return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus("all nodes restore failed", resourceStatuses)) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus(resourceStatuses)) } // injectTaintToNode adds an unreachable taint to a node. func (d *PodTaintNodeActionExecutor) injectTaintToNode(ctx context.Context, node *v1.Node, taintKey, taintValue, taintEffect, experimentId string) error { if node.Annotations == nil { node.Annotations = make(map[string]string) } // Check for conflicting experiment if existingId, ok := node.Annotations[ChaosBladeExperimentAnnotation]; ok && existingId != "" && existingId != experimentId { return fmt.Errorf("node is already modified by another chaosblade experiment: %s", existingId) } // Idempotent: if already modified by the same experiment, skip re-injection if node.Annotations[ChaosBladeExperimentAnnotation] == experimentId { return nil } // Backup original taints if len(node.Spec.Taints) > 0 { originalBytes, err := json.Marshal(node.Spec.Taints) if err != nil { return fmt.Errorf("marshal original taints failed: %v", err) } node.Annotations[ChaosBladeOriginalTaintsAnnotation] = string(originalBytes) } // Add the unreachable taint; refuse to overwrite an existing taint with the same key+effect newTaint := v1.Taint{ Key: taintKey, Value: taintValue, Effect: v1.TaintEffect(taintEffect), } if idx := findTaintIndex(node.Spec.Taints, newTaint.Key, newTaint.Effect); idx >= 0 { existing := node.Spec.Taints[idx] return fmt.Errorf("node already has taint with key %q and effect %q (value %q); refusing to overwrite existing taint", existing.Key, existing.Effect, existing.Value) } node.Spec.Taints = append(node.Spec.Taints, newTaint) // Record injected taint for surgical removal during restore injectedBytes, _ := json.Marshal(newTaint) node.Annotations[ChaosBladeInjectedTaintAnnotation] = string(injectedBytes) // Set annotations node.Annotations[ChaosBladeTaintAnnotation] = ChaosBladeModifyAction node.Annotations[ChaosBladeExperimentAnnotation] = experimentId return d.client.Update(ctx, node) } // restoreNodeTaints removes only the taint injected by this experiment, // preserving any taints added by other controllers during the experiment. func (d *PodTaintNodeActionExecutor) restoreNodeTaints(ctx context.Context, node *v1.Node, experimentId string) error { if node.Annotations == nil { return nil } // If this experiment did not modify the node, nothing to restore if node.Annotations[ChaosBladeExperimentAnnotation] != experimentId { return nil } // Remove only the injected taint, preserving taints added by other controllers if injectedStr, ok := node.Annotations[ChaosBladeInjectedTaintAnnotation]; ok { var injected v1.Taint if err := json.Unmarshal([]byte(injectedStr), &injected); err != nil { return fmt.Errorf("unmarshal injected taint failed: %v", err) } node.Spec.Taints = removeTaintByKeyEffect(node.Spec.Taints, injected.Key, injected.Effect) } else { // Fallback: no injected-taint annotation, use original snapshot if originalTaintsStr, ok := node.Annotations[ChaosBladeOriginalTaintsAnnotation]; ok { var originalTaints []v1.Taint if err := json.Unmarshal([]byte(originalTaintsStr), &originalTaints); err != nil { return fmt.Errorf("unmarshal original taints failed: %v", err) } node.Spec.Taints = originalTaints } else { node.Spec.Taints = nil } } // Clean up annotations delete(node.Annotations, ChaosBladeTaintAnnotation) for _, key := range chaosBladeTaintAnnotations() { delete(node.Annotations, key) } return d.client.Update(ctx, node) } // findTaintIndex returns the index of the first taint matching key+effect, or -1. func findTaintIndex(taints []v1.Taint, key string, effect v1.TaintEffect) int { for i, t := range taints { if t.Key == key && t.Effect == effect { return i } } return -1 } // removeTaintByKeyEffect removes the first taint matching key+effect from the list. // Kubernetes guarantees key+effect uniqueness per node, so this is sufficient for removal. func removeTaintByKeyEffect(taints []v1.Taint, key string, effect v1.TaintEffect) []v1.Taint { for i, t := range taints { if t.Key == key && t.Effect == effect { return append(taints[:i], taints[i+1:]...) } } return taints } // parseNodeNames splits a comma-separated nodes flag, trims whitespace, and rejects empty entries. func parseNodeNames(nodesFlag string) ([]string, error) { var result []string for _, n := range strings.Split(nodesFlag, ",") { n = strings.TrimSpace(n) if n == "" { return nil, fmt.Errorf("nodes flag contains empty node name") } result = append(result, n) } return result, nil } // validateTaintNodeFlags validates the nodes flag. func validateTaintNodeFlags(nodes string) *spec.Response { if nodes == "" { return spec.ResponseFailWithFlags(spec.ParameterLess, "nodes") } return nil } // PreCreate implements model.ActionPreProcessor interface. func (a *PodTaintNodeActionSpec) PreCreate(ctx context.Context, expModel *spec.ExpModel, client *channel.Client) (context.Context, *spec.Response) { nodes := expModel.ActionFlags["nodes"] if resp := validateTaintNodeFlags(nodes); resp != nil { return ctx, resp } nodeNames, err := parseNodeNames(nodes) if err != nil { return ctx, spec.ResponseFailWithFlags(spec.ParameterIllegal, err.Error()) } taintEffect := expModel.ActionFlags["taint-effect"] if taintEffect == "" { taintEffect = DefaultTaintEffect } // Validate taint effect in PreCreate to fail fast switch taintEffect { case string(v1.TaintEffectNoSchedule), string(v1.TaintEffectNoExecute), string(v1.TaintEffectPreferNoSchedule): default: return ctx, spec.ResponseFailWithFlags(spec.ParameterIllegal, fmt.Sprintf("unsupported taint effect: %s, supported values: NoSchedule, NoExecute, PreferNoSchedule", taintEffect)) } containerObjectMetaList := model.ContainerMatchedList{} for _, nodeName := range nodeNames { containerObjectMetaList = append(containerObjectMetaList, model.ContainerObjectMeta{ Namespace: "", PodName: fmt.Sprintf("chaosblade-tn-%s", nodeName), NodeName: nodeName, }) } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } // PreDestroy implements model.ActionPreProcessor interface. func (a *PodTaintNodeActionSpec) PreDestroy(ctx context.Context, expModel *spec.ExpModel, client *channel.Client, oldExpStatus v1alpha1.ExperimentStatus) (context.Context, *spec.Response) { nodes := expModel.ActionFlags["nodes"] if resp := validateTaintNodeFlags(nodes); resp != nil { return ctx, resp } nodeNames, err := parseNodeNames(nodes) if err != nil { return ctx, spec.ResponseFailWithFlags(spec.ParameterIllegal, err.Error()) } containerObjectMetaList := model.ContainerMatchedList{} for _, nodeName := range nodeNames { containerObjectMetaList = append(containerObjectMetaList, model.ContainerObjectMeta{ Namespace: "", PodName: fmt.Sprintf("chaosblade-tn-%s", nodeName), NodeName: nodeName, }) } ctx = model.SetContainerObjectMetaListToContext(ctx, containerObjectMetaList) return ctx, nil } ================================================ FILE: exec/pod/taintnode_test.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "encoding/json" "strings" "testing" "github.com/chaosblade-io/chaosblade-spec-go/spec" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestParseTaintNodeFlags(t *testing.T) { tests := []struct { name string actionFlags map[string]string wantNodes []string wantKey string wantValue string wantEffect string wantErr bool errContains string }{ { name: "default values", actionFlags: map[string]string{"nodes": "node1"}, wantNodes: []string{"node1"}, wantKey: DefaultTaintKey, wantValue: DefaultTaintValue, wantEffect: DefaultTaintEffect, }, { name: "multiple nodes", actionFlags: map[string]string{"nodes": "node1,node2,node3"}, wantNodes: []string{"node1", "node2", "node3"}, wantKey: DefaultTaintKey, wantValue: DefaultTaintValue, wantEffect: DefaultTaintEffect, }, { name: "custom taint params", actionFlags: map[string]string{ "nodes": "node1", "taint-key": "dedicated", "taint-value": "gpu", "taint-effect": "NoExecute", }, wantNodes: []string{"node1"}, wantKey: "dedicated", wantValue: "gpu", wantEffect: "NoExecute", }, { name: "missing nodes flag", actionFlags: map[string]string{}, wantErr: true, errContains: "nodes flag is required", }, { name: "unsupported taint effect", actionFlags: map[string]string{ "nodes": "node1", "taint-effect": "InvalidEffect", }, wantErr: true, errContains: "unsupported taint effect", }, { name: "PreferNoSchedule is valid", actionFlags: map[string]string{"nodes": "node1", "taint-effect": "PreferNoSchedule"}, wantNodes: []string{"node1"}, wantKey: DefaultTaintKey, wantValue: DefaultTaintValue, wantEffect: "PreferNoSchedule", }, { name: "whitespace trimmed", actionFlags: map[string]string{"nodes": " node1 , node2 "}, wantNodes: []string{"node1", "node2"}, wantKey: DefaultTaintKey, wantValue: DefaultTaintValue, wantEffect: DefaultTaintEffect, }, { name: "trailing comma rejected", actionFlags: map[string]string{"nodes": "node1,"}, wantErr: true, errContains: "empty node name", }, { name: "double comma rejected", actionFlags: map[string]string{"nodes": "node1,,node2"}, wantErr: true, errContains: "empty node name", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { expModel := &spec.ExpModel{ActionFlags: tt.actionFlags} nodes, key, value, effect, err := parseTaintNodeFlags(expModel) if tt.wantErr { if err == nil { t.Errorf("parseTaintNodeFlags() expected error containing %q, got nil", tt.errContains) } else if tt.errContains != "" && !containsString(err.Error(), tt.errContains) { t.Errorf("parseTaintNodeFlags() error = %q, want containing %q", err.Error(), tt.errContains) } return } if err != nil { t.Errorf("parseTaintNodeFlags() unexpected error: %v", err) return } if len(nodes) != len(tt.wantNodes) { t.Errorf("parseTaintNodeFlags() nodes = %v, want %v", nodes, tt.wantNodes) } for i, n := range nodes { if n != tt.wantNodes[i] { t.Errorf("parseTaintNodeFlags() nodes[%d] = %q, want %q", i, n, tt.wantNodes[i]) } } if key != tt.wantKey { t.Errorf("parseTaintNodeFlags() key = %q, want %q", key, tt.wantKey) } if value != tt.wantValue { t.Errorf("parseTaintNodeFlags() value = %q, want %q", value, tt.wantValue) } if effect != tt.wantEffect { t.Errorf("parseTaintNodeFlags() effect = %q, want %q", effect, tt.wantEffect) } }) } } func TestValidateTaintNodeFlags(t *testing.T) { tests := []struct { name string nodes string wantFail bool }{ {"valid nodes", "node1", false}, {"empty nodes", "", true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { resp := validateTaintNodeFlags(tt.nodes) if tt.wantFail && resp == nil { t.Error("validateTaintNodeFlags() expected failure, got nil") } if !tt.wantFail && resp != nil { t.Errorf("validateTaintNodeFlags() unexpected failure: %v", resp) } }) } } func TestInjectTaintToNode_ConflictDetection(t *testing.T) { node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", Annotations: map[string]string{ ChaosBladeExperimentAnnotation: "other-experiment", }, }, } executor := &PodTaintNodeActionExecutor{} // injectTaintToNode is a method, so we test the conflict logic directly err := executor.injectTaintToNode(nil, node, DefaultTaintKey, DefaultTaintValue, DefaultTaintEffect, "my-experiment") if err == nil { t.Error("expected conflict error when node is already modified by another experiment") } if !containsString(err.Error(), "already modified by another chaosblade experiment") { t.Errorf("unexpected error message: %v", err) } } func TestInjectTaintToNode_Idempotent(t *testing.T) { node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", Annotations: map[string]string{ ChaosBladeExperimentAnnotation: "my-experiment", }, }, } executor := &PodTaintNodeActionExecutor{} // Same experiment ID should skip injection (idempotent) err := executor.injectTaintToNode(nil, node, DefaultTaintKey, DefaultTaintValue, DefaultTaintEffect, "my-experiment") if err != nil { t.Errorf("expected nil for idempotent injection, got: %v", err) } } func TestInjectTaintToNode_BackupAndInject(t *testing.T) { originalTaints := []v1.Taint{ {Key: "existing-key", Value: "existing-value", Effect: v1.TaintEffectNoSchedule}, } node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", Annotations: map[string]string{}, }, Spec: v1.NodeSpec{ Taints: originalTaints, }, } // Simulate the injection logic without calling injectTaintToNode // (which requires a real client for Update) backupBytes, _ := json.Marshal(originalTaints) node.Annotations[ChaosBladeOriginalTaintsAnnotation] = string(backupBytes) node.Spec.Taints = append(node.Spec.Taints, v1.Taint{ Key: DefaultTaintKey, Value: DefaultTaintValue, Effect: v1.TaintEffect(DefaultTaintEffect), }) node.Annotations[ChaosBladeTaintAnnotation] = ChaosBladeModifyAction node.Annotations[ChaosBladeExperimentAnnotation] = "my-experiment" // Verify original taints are backed up backupStr := node.Annotations[ChaosBladeOriginalTaintsAnnotation] if backupStr == "" { t.Error("expected original taints to be backed up in annotations") } var backedUp []v1.Taint if err := json.Unmarshal([]byte(backupStr), &backedUp); err != nil { t.Fatalf("failed to unmarshal backup: %v", err) } if len(backedUp) != 1 || backedUp[0].Key != "existing-key" { t.Errorf("backup = %v, want [{existing-key existing-value NoSchedule}]", backedUp) } // Verify new taint was appended found := false for _, tt := range node.Spec.Taints { if tt.Key == DefaultTaintKey { found = true if tt.Value != DefaultTaintValue || tt.Effect != v1.TaintEffectNoSchedule { t.Errorf("injected taint = %v, want key=%s value=%s effect=NoSchedule", tt, DefaultTaintKey, DefaultTaintValue) } } } if !found { t.Error("expected chaosblade taint to be added to node spec") } // Verify annotations if node.Annotations[ChaosBladeTaintAnnotation] != ChaosBladeModifyAction { t.Errorf("chaosblade.io/taint annotation = %q, want %q", node.Annotations[ChaosBladeTaintAnnotation], ChaosBladeModifyAction) } if node.Annotations[ChaosBladeExperimentAnnotation] != "my-experiment" { t.Errorf("chaosblade.io/experiment annotation = %q, want %q", node.Annotations[ChaosBladeExperimentAnnotation], "my-experiment") } } func TestInjectTaintToNode_DuplicateKeyEffect(t *testing.T) { // Node already has a taint with the same key+effect as the one we want to inject node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", Annotations: map[string]string{}, }, Spec: v1.NodeSpec{ Taints: []v1.Taint{ {Key: DefaultTaintKey, Value: "existing-value", Effect: v1.TaintEffectNoSchedule}, }, }, } executor := &PodTaintNodeActionExecutor{} err := executor.injectTaintToNode(nil, node, DefaultTaintKey, DefaultTaintValue, DefaultTaintEffect, "my-experiment") if err == nil { t.Error("expected error when node already has taint with same key+effect") } if !strings.Contains(err.Error(), "refusing to overwrite") { t.Errorf("unexpected error message: %v", err) } // Verify node taints were NOT modified if len(node.Spec.Taints) != 1 { t.Fatalf("expected 1 taint (unchanged), got %d", len(node.Spec.Taints)) } if node.Spec.Taints[0].Value != "existing-value" { t.Errorf("existing taint value should be unchanged, got %q", node.Spec.Taints[0].Value) } } func TestRestoreNodeTaints_NotModifiedByExperiment(t *testing.T) { tests := []struct { name string annotations map[string]string experiment string wantErr bool }{ { name: "nil annotations - no error", annotations: nil, experiment: "my-experiment", wantErr: false, }, { name: "different experiment - skip restore", annotations: map[string]string{ChaosBladeExperimentAnnotation: "other-experiment"}, experiment: "my-experiment", wantErr: false, }, { name: "empty annotations - skip restore", annotations: map[string]string{}, experiment: "my-experiment", wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", Annotations: tt.annotations, }, } executor := &PodTaintNodeActionExecutor{} err := executor.restoreNodeTaints(nil, node, tt.experiment) if tt.wantErr && err == nil { t.Error("expected error, got nil") } if !tt.wantErr && err != nil { t.Errorf("unexpected error: %v", err) } }) } } func TestRestoreNodeTaints_SuccessfulRestore(t *testing.T) { originalTaints := []v1.Taint{ {Key: "sigma.ali/resource-pool", Value: "ackee_pool", Effect: v1.TaintEffectNoSchedule}, } backupBytes, _ := json.Marshal(originalTaints) injectedTaint := v1.Taint{Key: DefaultTaintKey, Value: DefaultTaintValue, Effect: v1.TaintEffectNoSchedule} injectedBytes, _ := json.Marshal(injectedTaint) node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", Annotations: map[string]string{ ChaosBladeExperimentAnnotation: "my-experiment", ChaosBladeOriginalTaintsAnnotation: string(backupBytes), ChaosBladeInjectedTaintAnnotation: string(injectedBytes), ChaosBladeTaintAnnotation: ChaosBladeModifyAction, }, }, Spec: v1.NodeSpec{ Taints: []v1.Taint{ {Key: "sigma.ali/resource-pool", Value: "ackee_pool", Effect: v1.TaintEffectNoSchedule}, {Key: DefaultTaintKey, Value: DefaultTaintValue, Effect: v1.TaintEffectNoSchedule}, }, }, } // Simulate restore logic: remove only the injected taint if injectedStr, ok := node.Annotations[ChaosBladeInjectedTaintAnnotation]; ok { var injected v1.Taint if err := json.Unmarshal([]byte(injectedStr), &injected); err != nil { t.Fatalf("failed to unmarshal injected taint: %v", err) } node.Spec.Taints = removeTaintByKeyEffect(node.Spec.Taints, injected.Key, injected.Effect) } delete(node.Annotations, ChaosBladeTaintAnnotation) for _, key := range chaosBladeTaintAnnotations() { delete(node.Annotations, key) } // Verify injected taint removed, original preserved if len(node.Spec.Taints) != 1 { t.Fatalf("expected 1 taint after restore, got %d", len(node.Spec.Taints)) } if node.Spec.Taints[0].Key != "sigma.ali/resource-pool" { t.Errorf("remaining taint key = %q, want sigma.ali/resource-pool", node.Spec.Taints[0].Key) } // Verify annotations cleaned up for _, key := range chaosBladeTaintAnnotations() { if _, ok := node.Annotations[key]; ok { t.Errorf("annotation %q should be cleaned up", key) } } if _, ok := node.Annotations[ChaosBladeTaintAnnotation]; ok { t.Error("chaosblade.io/taint annotation should be cleaned up") } } func TestRestoreNodeTaints_PreservesNewTaints(t *testing.T) { // Simulate: another controller added a taint while experiment was running originalTaints := []v1.Taint{ {Key: "sigma.ali/resource-pool", Value: "ackee_pool", Effect: v1.TaintEffectNoSchedule}, } backupBytes, _ := json.Marshal(originalTaints) injectedTaint := v1.Taint{Key: DefaultTaintKey, Value: DefaultTaintValue, Effect: v1.TaintEffectNoSchedule} injectedBytes, _ := json.Marshal(injectedTaint) node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", Annotations: map[string]string{ ChaosBladeExperimentAnnotation: "my-experiment", ChaosBladeOriginalTaintsAnnotation: string(backupBytes), ChaosBladeInjectedTaintAnnotation: string(injectedBytes), ChaosBladeTaintAnnotation: ChaosBladeModifyAction, }, }, Spec: v1.NodeSpec{ Taints: []v1.Taint{ {Key: "sigma.ali/resource-pool", Value: "ackee_pool", Effect: v1.TaintEffectNoSchedule}, {Key: "other-controller/key", Value: "val", Effect: v1.TaintEffectNoSchedule}, {Key: DefaultTaintKey, Value: DefaultTaintValue, Effect: v1.TaintEffectNoSchedule}, }, }, } // Simulate restore logic if injectedStr, ok := node.Annotations[ChaosBladeInjectedTaintAnnotation]; ok { var injected v1.Taint if err := json.Unmarshal([]byte(injectedStr), &injected); err != nil { t.Fatalf("failed to unmarshal injected taint: %v", err) } node.Spec.Taints = removeTaintByKeyEffect(node.Spec.Taints, injected.Key, injected.Effect) } delete(node.Annotations, ChaosBladeTaintAnnotation) for _, key := range chaosBladeTaintAnnotations() { delete(node.Annotations, key) } // Verify: injected taint removed, other-controller taint preserved if len(node.Spec.Taints) != 2 { t.Fatalf("expected 2 taints after restore, got %d: %v", len(node.Spec.Taints), node.Spec.Taints) } found := false for _, t := range node.Spec.Taints { if t.Key == "other-controller/key" { found = true } } if !found { t.Error("expected other-controller taint to be preserved after restore") } } func TestRestoreNodeTaints_NoBackup(t *testing.T) { // Node had no taints before injection, so no backup exists injectedTaint := v1.Taint{Key: DefaultTaintKey, Value: DefaultTaintValue, Effect: v1.TaintEffectNoSchedule} injectedBytes, _ := json.Marshal(injectedTaint) node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test-node", Annotations: map[string]string{ ChaosBladeExperimentAnnotation: "my-experiment", ChaosBladeInjectedTaintAnnotation: string(injectedBytes), ChaosBladeTaintAnnotation: ChaosBladeModifyAction, }, }, Spec: v1.NodeSpec{ Taints: []v1.Taint{ {Key: DefaultTaintKey, Value: DefaultTaintValue, Effect: v1.TaintEffectNoSchedule}, }, }, } // Simulate restore logic: remove only the injected taint if injectedStr, ok := node.Annotations[ChaosBladeInjectedTaintAnnotation]; ok { var injected v1.Taint if err := json.Unmarshal([]byte(injectedStr), &injected); err != nil { t.Fatalf("failed to unmarshal injected taint: %v", err) } node.Spec.Taints = removeTaintByKeyEffect(node.Spec.Taints, injected.Key, injected.Effect) } delete(node.Annotations, ChaosBladeTaintAnnotation) for _, key := range chaosBladeTaintAnnotations() { delete(node.Annotations, key) } // Should have no taints after restore if len(node.Spec.Taints) != 0 { t.Errorf("expected 0 taints after restore (node had no original taints), got %d: %v", len(node.Spec.Taints), node.Spec.Taints) } } func containsString(s, substr string) bool { return strings.Contains(s, substr) } ================================================ FILE: exec/pod/terminating.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "fmt" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ( // PodTerminatingFinalizer is the finalizer added to the target pod to block its deletion PodTerminatingFinalizer = "chaosblade.io/pod-terminating" ) type PodTerminatingActionSpec struct { spec.BaseExpActionCommandSpec } func NewPodTerminatingActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &PodTerminatingActionSpec{ spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{}, ActionExecutor: &PodTerminatingActionExecutor{client: client}, ActionExample: `# Make the pod stuck in Terminating state in the default namespace blade create k8s pod-pod terminating --names nginx-app --namespace default --kubeconfig ~/.kube/config # Make pods stuck in Terminating state by labels blade create k8s pod-pod terminating --labels app=guestbook --namespace default --evict-count 2 --kubeconfig ~/.kube/config `, ActionCategories: []string{model.CategorySystemContainer}, }, } } func (*PodTerminatingActionSpec) Name() string { return "terminating" } func (*PodTerminatingActionSpec) Aliases() []string { return []string{} } func (*PodTerminatingActionSpec) ShortDesc() string { return "Make pod stuck in Terminating state by adding a finalizer" } func (*PodTerminatingActionSpec) LongDesc() string { return "Simulate the scenario where a Pod is stuck in Terminating state due to uncleaned finalizers. " + "This fault injects by adding a custom finalizer to the target Pod and then deleting it, " + "which causes the Pod to remain in Terminating state because the finalizer prevents garbage collection. " + "When the experiment is destroyed, the finalizer will be removed so the Pod can be fully deleted." } type PodTerminatingActionExecutor struct { client *channel.Client } func (*PodTerminatingActionExecutor) Name() string { return "terminating" } func (*PodTerminatingActionExecutor) SetChannel(channel spec.Channel) {} func (d *PodTerminatingActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(uid, ctx, expModel) } return d.create(uid, ctx, expModel) } func (d *PodTerminatingActionExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } statuses := make([]v1alpha1.ResourceStatus, 0) success := false for _, meta := range containerObjectMetaList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: meta.GetIdentifier(), } pod := &v1.Pod{} err := d.client.Get(ctx, types.NamespacedName{Name: meta.PodName, Namespace: meta.Namespace}, pod) if err != nil { logrusField.Warningf("get pod %s/%s err, %v", meta.Namespace, meta.PodName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } // Skip if pod is already being deleted if pod.DeletionTimestamp != nil { logrusField.Warningf("pod %s/%s is already terminating, cannot inject fault", meta.Namespace, meta.PodName) status = status.CreateFailResourceStatus("pod is already in Terminating state, no fault injected", spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } // Step 1: Add the finalizer to the pod if err := d.addFinalizer(ctx, pod); err != nil { logrusField.Warningf("add finalizer to pod %s/%s err, %v", meta.Namespace, meta.PodName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("add finalizer failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } // Step 2: Delete the pod, it will be stuck in Terminating because of the finalizer if err := d.client.Delete(ctx, pod); err != nil { logrusField.Warningf("delete pod %s/%s err, %v", meta.Namespace, meta.PodName, err) // Best-effort rollback: remove the finalizer we just added if rbErr := d.removeFinalizer(ctx, pod); rbErr != nil { logrusField.Warningf("rollback finalizer for pod %s/%s failed: %v", meta.Namespace, meta.PodName, rbErr) } status = status.CreateFailResourceStatus(fmt.Sprintf("delete pod failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } logrusField.Infof("pod %s/%s is now stuck in Terminating state with finalizer %s", meta.Namespace, meta.PodName, PodTerminatingFinalizer) status = status.CreateSuccessResourceStatus() statuses = append(statuses, status) success = true } var experimentStatus v1alpha1.ExperimentStatus if success { experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses) } return spec.ReturnResultIgnoreCode(experimentStatus) } func (d *PodTerminatingActionExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrusField := logrus.WithField("experiment", experimentId) containerObjectMetaList, err := model.GetContainerObjectMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus(spec.ContainerInContextNotFound.Msg, []v1alpha1.ResourceStatus{})) } statuses := make([]v1alpha1.ResourceStatus, 0) allSuccess := true for _, meta := range containerObjectMetaList { status := v1alpha1.ResourceStatus{ Kind: v1alpha1.PodKind, Identifier: meta.GetIdentifier(), } pod := &v1.Pod{} err := d.client.Get(ctx, types.NamespacedName{Name: meta.PodName, Namespace: meta.Namespace}, pod) if err != nil { // Distinguish between NotFound and other errors (RBAC, API server unreachable, etc.) if apierrors.IsNotFound(err) { // Pod is already fully deleted, treat as success logrusField.Infof("pod %s/%s already deleted", meta.Namespace, meta.PodName) status = status.CreateSuccessResourceStatus() status.State = v1alpha1.DestroyedState } else { // Other errors mean the finalizer may still be present logrusField.Warningf("get pod %s/%s err, %v", meta.Namespace, meta.PodName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("get pod failed: %v", err), spec.K8sExecFailed.Code) allSuccess = false } statuses = append(statuses, status) continue } // Remove the finalizer to allow the pod to be fully deleted if err := d.removeFinalizer(ctx, pod); err != nil { logrusField.Warningf("remove finalizer from pod %s/%s err, %v", meta.Namespace, meta.PodName, err) status = status.CreateFailResourceStatus(fmt.Sprintf("remove finalizer failed: %v", err), spec.K8sExecFailed.Code) statuses = append(statuses, status) allSuccess = false continue } logrusField.Infof("removed finalizer from pod %s/%s, pod will be fully deleted", meta.Namespace, meta.PodName) status = status.CreateSuccessResourceStatus() status.State = v1alpha1.DestroyedState statuses = append(statuses, status) } if allSuccess { return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus(statuses)) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses)) } // addFinalizer adds the chaosblade pod-terminating finalizer to the pod func (d *PodTerminatingActionExecutor) addFinalizer(ctx context.Context, pod *v1.Pod) error { finalizers := pod.GetFinalizers() for _, f := range finalizers { if f == PodTerminatingFinalizer { // Finalizer already exists return nil } } pod.SetFinalizers(append(finalizers, PodTerminatingFinalizer)) return d.client.Update(ctx, pod) } // removeFinalizer removes the chaosblade pod-terminating finalizer from the pod func (d *PodTerminatingActionExecutor) removeFinalizer(ctx context.Context, pod *v1.Pod) error { finalizers := pod.GetFinalizers() newFinalizers := make([]string, 0, len(finalizers)) found := false for _, f := range finalizers { if f == PodTerminatingFinalizer { found = true continue } newFinalizers = append(newFinalizers, f) } if !found { // Finalizer not found, nothing to remove return nil } pod.SetFinalizers(newFinalizers) return d.client.Update(ctx, pod) } ================================================ FILE: exec/service/controller.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package service import ( "context" "fmt" "strings" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/sirupsen/logrus" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ServiceMetaListKey = "ServiceMetaListKey" type ServiceMeta struct { Id string ServiceName string Namespace string } type ServiceMetaList []ServiceMeta func GetServiceMetaListFromContext(ctx context.Context) (ServiceMetaList, error) { val := ctx.Value(ServiceMetaListKey) if val == nil { return nil, fmt.Errorf("less service meta in context") } return val.(ServiceMetaList), nil } func SetServiceMetaListToContext(ctx context.Context, list ServiceMetaList) context.Context { return context.WithValue(ctx, ServiceMetaListKey, list) } type ExpController struct { model.BaseExperimentController } func NewExpController(client *channel.Client) model.ExperimentController { return &ExpController{ model.BaseExperimentController{ Client: client, ResourceModelSpec: NewResourceModelSpec(client), }, } } func (*ExpController) Name() string { return "service" } func (e *ExpController) Create(ctx context.Context, expSpec v1alpha1.ExperimentSpec) *spec.Response { expModel := model.ExtractExpModelFromExperimentSpec(expSpec) experimentId := model.GetExperimentIdFromContext(ctx) logrus.WithField("experiment", experimentId).Infof("creating service experiment") return e.Exec(ctx, expModel) } func (e *ExpController) Destroy(ctx context.Context, expSpec v1alpha1.ExperimentSpec, oldExpStatus v1alpha1.ExperimentStatus) *spec.Response { experimentId := model.GetExperimentIdFromContext(ctx) logrus.WithField("experiment", experimentId).Infoln("start to destroy service experiment") expModel := model.ExtractExpModelFromExperimentSpec(expSpec) statuses := oldExpStatus.ResStatuses if statuses == nil { return spec.ReturnSuccess(v1alpha1.CreateSuccessExperimentStatus([]v1alpha1.ResourceStatus{})) } serviceMetaList := ServiceMetaList{} for _, status := range statuses { if !status.Success { continue } meta := parseServiceIdentifier(status.Identifier) meta.Id = status.Id serviceMetaList = append(serviceMetaList, meta) } ctx = SetServiceMetaListToContext(ctx, serviceMetaList) return e.Exec(ctx, expModel) } // parseServiceIdentifier parses identifier in format "Namespace/ServiceName" func parseServiceIdentifier(identifier string) ServiceMeta { parts := strings.SplitN(identifier, "/", 2) meta := ServiceMeta{} if len(parts) >= 1 { meta.Namespace = parts[0] } if len(parts) >= 2 { meta.ServiceName = parts[1] } return meta } ================================================ FILE: exec/service/create.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package service import ( "context" "fmt" "strconv" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ( NamePrefixFlag = "name-prefix" ServiceCountFlag = "service-count" PortsPerServiceFlag = "ports-per-service" ) type CreateServiceActionSpec struct { spec.BaseExpActionCommandSpec } func NewCreateServiceActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &CreateServiceActionSpec{ spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: NamePrefixFlag, Desc: "Name prefix for the created services", NoArgs: false, Required: true, }, &spec.ExpFlag{ Name: ServiceCountFlag, Desc: "Number of services to create", NoArgs: false, Required: true, }, &spec.ExpFlag{ Name: PortsPerServiceFlag, Desc: "Number of ports per service, min 1, max 100, default 10", NoArgs: false, }, }, ActionExecutor: &CreateServiceActionExecutor{client: client}, ActionExample: `# Create 2000 services with prefix my-service in default namespace blade create k8s service-self create --name-prefix my-service --namespace default --service-count 2000 --kubeconfig ~/.kube/config`, ActionCategories: []string{model.CategorySystemContainer}, }, } } func (*CreateServiceActionSpec) Name() string { return "create" } func (*CreateServiceActionSpec) Aliases() []string { return []string{} } func (*CreateServiceActionSpec) ShortDesc() string { return "Create services in batch" } func (*CreateServiceActionSpec) LongDesc() string { return "Create the specified number of Kubernetes services with given name prefix, each containing configurable port mappings and no selector" } type CreateServiceActionExecutor struct { client *channel.Client } func (*CreateServiceActionExecutor) Name() string { return "create" } func (*CreateServiceActionExecutor) SetChannel(channel spec.Channel) { } func (d *CreateServiceActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(uid, ctx, expModel) } return d.create(uid, ctx, expModel) } func (d *CreateServiceActionExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { logrusField := logrus.WithField("experiment", model.GetExperimentIdFromContext(ctx)) namePrefix := expModel.ActionFlags[NamePrefixFlag] if namePrefix == "" { util.Errorf(uid, util.GetRunFuncName(), "name-prefix is required") return spec.ResponseFailWithResult(spec.ParameterLess, v1alpha1.CreateFailExperimentStatus("name-prefix is required", []v1alpha1.ResourceStatus{}), NamePrefixFlag) } namespace := expModel.ActionFlags["namespace"] if namespace == "" { namespace = "default" } serviceCountStr := expModel.ActionFlags[ServiceCountFlag] if serviceCountStr == "" { util.Errorf(uid, util.GetRunFuncName(), "service-count is required") return spec.ResponseFailWithResult(spec.ParameterLess, v1alpha1.CreateFailExperimentStatus("service-count is required", []v1alpha1.ResourceStatus{}), ServiceCountFlag) } serviceCount, err := strconv.Atoi(serviceCountStr) if err != nil { return spec.ResponseFailWithResult(spec.ParameterIllegal, v1alpha1.CreateFailExperimentStatus(fmt.Sprintf("service-count is invalid: %v", err), []v1alpha1.ResourceStatus{}), ServiceCountFlag, serviceCountStr, err) } if serviceCount < 1 || serviceCount > 20000 { return spec.ResponseFailWithResult(spec.ParameterIllegal, v1alpha1.CreateFailExperimentStatus(fmt.Sprintf("service-count must be between 1 and 20000, got %d", serviceCount), []v1alpha1.ResourceStatus{}), ServiceCountFlag, strconv.Itoa(serviceCount), "must be between 1 and 20000") } portsPerService := 10 if v := expModel.ActionFlags[PortsPerServiceFlag]; v != "" { portsPerService, err = strconv.Atoi(v) if err != nil { return spec.ResponseFailWithResult(spec.ParameterIllegal, v1alpha1.CreateFailExperimentStatus(fmt.Sprintf("ports-per-service is invalid: %v", err), []v1alpha1.ResourceStatus{}), PortsPerServiceFlag, v, err) } } if portsPerService < 1 || portsPerService > 100 { return spec.ResponseFailWithResult(spec.ParameterIllegal, v1alpha1.CreateFailExperimentStatus(fmt.Sprintf("ports-per-service must be between 1 and 100, got %d", portsPerService), []v1alpha1.ResourceStatus{}), PortsPerServiceFlag, strconv.Itoa(portsPerService), "must be between 1 and 100") } logrusField.Infof("creating %d services with prefix %s in namespace %s", serviceCount, namePrefix, namespace) statuses := make([]v1alpha1.ResourceStatus, 0) success := false for i := 0; i < serviceCount; i++ { serviceName := fmt.Sprintf("%s-%s-%d", namePrefix, uid, i) status := v1alpha1.ResourceStatus{ Kind: v1alpha1.ServiceKind, Identifier: fmt.Sprintf("%s/%s", namespace, serviceName), } svc := buildService(serviceName, namespace, portsPerService, uid) if err := d.client.Create(context.TODO(), svc); err != nil { logrusField.Warningf("create service %s err, %v", serviceName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) } else { status = status.CreateSuccessResourceStatus() success = true } statuses = append(statuses, status) } var experimentStatus v1alpha1.ExperimentStatus if success { experimentStatus = v1alpha1.CreateSuccessExperimentStatus(statuses) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses) } return spec.ReturnResultIgnoreCode(experimentStatus) } func (d *CreateServiceActionExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { logrusField := logrus.WithField("experiment", model.GetExperimentIdFromContext(ctx)) serviceMetaList, err := GetServiceMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus("cannot get service meta from context", []v1alpha1.ResourceStatus{})) } statuses := make([]v1alpha1.ResourceStatus, 0) success := false for _, meta := range serviceMetaList { status := v1alpha1.ResourceStatus{ Id: meta.Id, Kind: v1alpha1.ServiceKind, Identifier: fmt.Sprintf("%s/%s", meta.Namespace, meta.ServiceName), } svc := &v1.Service{} objectKey := types.NamespacedName{Name: meta.ServiceName, Namespace: meta.Namespace} if err := d.client.Get(context.TODO(), objectKey, svc); err != nil { logrusField.Warningf("get service %s err, %v", meta.ServiceName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } if _, ok := svc.Annotations[ServiceAnnotation]; !ok { errMsg := fmt.Sprintf("service %s/%s is not created by chaosblade (missing annotation %s), skip delete", meta.Namespace, meta.ServiceName, ServiceAnnotation) logrusField.Warning(errMsg) status = status.CreateFailResourceStatus(errMsg, spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } objectMeta := metav1.ObjectMeta{Name: meta.ServiceName, Namespace: meta.Namespace} if err := d.client.Delete(context.TODO(), &v1.Service{ObjectMeta: objectMeta}); err != nil { logrusField.Warningf("delete service %s err, %v", meta.ServiceName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) } else { status.State = v1alpha1.DestroyedState status.Success = true success = true } statuses = append(statuses, status) } var experimentStatus v1alpha1.ExperimentStatus if success { experimentStatus = v1alpha1.CreateDestroyedExperimentStatus(statuses) } else { experimentStatus = v1alpha1.CreateFailExperimentStatus("see resStatuses for details", statuses) } return spec.ReturnResultIgnoreCode(experimentStatus) } func buildService(name, namespace string, portsPerService int, uid string) *v1.Service { const portBase = 8000 ports := make([]v1.ServicePort, 0, portsPerService) for i := 0; i < portsPerService; i++ { port := int32(portBase + i) ports = append(ports, v1.ServicePort{ Name: fmt.Sprintf("p%d", port), Port: port, TargetPort: intstr.FromInt32(port), }) } return &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Annotations: map[string]string{ "chaosblade.io/service": fmt.Sprintf("create-%s", uid), }, }, Spec: v1.ServiceSpec{ Ports: ports, }, } } ================================================ FILE: exec/service/modify.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package service import ( "context" "encoding/json" "fmt" "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) const ( ServiceNameFlag = "name" ExternalTrafficPolicyFlag = "externalTrafficPolicy" InternalTrafficPolicyFlag = "internalTrafficPolicy" ServiceAnnotation = "chaosblade.io/service" ServiceModifyHistoryAnnotation = "chaosblade.io/service-modify-history" ) type ModifyServiceActionSpec struct { spec.BaseExpActionCommandSpec } func NewModifyServiceActionSpec(client *channel.Client) spec.ExpActionCommandSpec { return &ModifyServiceActionSpec{ spec.BaseExpActionCommandSpec{ ActionMatchers: []spec.ExpFlagSpec{}, ActionFlags: []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: ServiceNameFlag, Desc: "Service name to modify", NoArgs: false, Required: true, }, &spec.ExpFlag{ Name: ExternalTrafficPolicyFlag, Desc: "Set externalTrafficPolicy, values: Local or Cluster", NoArgs: false, }, &spec.ExpFlag{ Name: InternalTrafficPolicyFlag, Desc: "Set internalTrafficPolicy, values: Local or Cluster", NoArgs: false, }, }, ActionExecutor: &ModifyServiceActionExecutor{client: client}, ActionExample: `# Modify externalTrafficPolicy to Local blade create k8s service-self modify --name my-service --namespace default --externalTrafficPolicy Local --kubeconfig ~/.kube/config # Modify internalTrafficPolicy to Local blade create k8s service-self modify --name my-service --namespace default --internalTrafficPolicy Local --kubeconfig ~/.kube/config # Modify both policies blade create k8s service-self modify --name my-service --namespace default --externalTrafficPolicy Local --internalTrafficPolicy Cluster --kubeconfig ~/.kube/config`, ActionCategories: []string{model.CategorySystemContainer}, }, } } func (*ModifyServiceActionSpec) Name() string { return "modify" } func (*ModifyServiceActionSpec) Aliases() []string { return []string{} } func (*ModifyServiceActionSpec) ShortDesc() string { return "Modify service traffic policy" } func (*ModifyServiceActionSpec) LongDesc() string { return "Modify existing Kubernetes service's externalTrafficPolicy or internalTrafficPolicy" } type ModifyServiceActionExecutor struct { client *channel.Client } func (*ModifyServiceActionExecutor) Name() string { return "modify" } func (*ModifyServiceActionExecutor) SetChannel(channel spec.Channel) { } func (d *ModifyServiceActionExecutor) Exec(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { if _, ok := spec.IsDestroy(ctx); ok { return d.destroy(uid, ctx, expModel) } return d.create(uid, ctx, expModel) } func (d *ModifyServiceActionExecutor) create(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { logrusField := logrus.WithField("experiment", model.GetExperimentIdFromContext(ctx)) serviceName := expModel.ActionFlags[ServiceNameFlag] if serviceName == "" { util.Errorf(uid, util.GetRunFuncName(), "name is required") return spec.ResponseFailWithResult(spec.ParameterLess, v1alpha1.CreateFailExperimentStatus("name is required", []v1alpha1.ResourceStatus{}), ServiceNameFlag) } namespace := expModel.ActionFlags["namespace"] if namespace == "" { namespace = "default" } externalPolicy := expModel.ActionFlags[ExternalTrafficPolicyFlag] internalPolicy := expModel.ActionFlags[InternalTrafficPolicyFlag] if externalPolicy == "" && internalPolicy == "" { util.Errorf(uid, util.GetRunFuncName(), "at least one of externalTrafficPolicy or internalTrafficPolicy is required") return spec.ResponseFailWithResult(spec.ParameterLess, v1alpha1.CreateFailExperimentStatus("at least one of externalTrafficPolicy or internalTrafficPolicy is required", []v1alpha1.ResourceStatus{}), fmt.Sprintf("%s or %s", ExternalTrafficPolicyFlag, InternalTrafficPolicyFlag)) } if externalPolicy != "" && !isValidPolicy(externalPolicy) { return spec.ResponseFailWithResult(spec.ParameterIllegal, v1alpha1.CreateFailExperimentStatus(fmt.Sprintf("invalid externalTrafficPolicy: %s, must be Local or Cluster", externalPolicy), []v1alpha1.ResourceStatus{}), ExternalTrafficPolicyFlag, externalPolicy, "must be Local or Cluster") } if internalPolicy != "" && !isValidPolicy(internalPolicy) { return spec.ResponseFailWithResult(spec.ParameterIllegal, v1alpha1.CreateFailExperimentStatus(fmt.Sprintf("invalid internalTrafficPolicy: %s, must be Local or Cluster", internalPolicy), []v1alpha1.ResourceStatus{}), InternalTrafficPolicyFlag, internalPolicy, "must be Local or Cluster") } logrusField.Infof("modifying service %s/%s, externalTrafficPolicy=%s, internalTrafficPolicy=%s", namespace, serviceName, externalPolicy, internalPolicy) status := v1alpha1.ResourceStatus{ Kind: v1alpha1.ServiceKind, Identifier: fmt.Sprintf("%s/%s", namespace, serviceName), } svc := &v1.Service{} objectKey := types.NamespacedName{Name: serviceName, Namespace: namespace} if err := d.client.Get(context.TODO(), objectKey, svc); err != nil { logrusField.Errorf("get service %s err, %v", serviceName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode( v1alpha1.CreateFailExperimentStatus(err.Error(), []v1alpha1.ResourceStatus{status}), ) } if existing, ok := svc.Annotations[ServiceAnnotation]; ok && existing != "" { err := fmt.Errorf("service %s/%s already has chaos experiment injected (annotation %s=%s), modifying service configuration is not allowed", namespace, serviceName, ServiceAnnotation, existing) logrusField.Warningf("%v", err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode( v1alpha1.CreateFailExperimentStatus(err.Error(), []v1alpha1.ResourceStatus{status}), ) } if svc.Annotations == nil { svc.Annotations = make(map[string]string) } history := make(map[string]string) if externalPolicy != "" { history[ExternalTrafficPolicyFlag] = string(svc.Spec.ExternalTrafficPolicy) switch externalPolicy { case string(v1.ServiceExternalTrafficPolicyTypeLocal): svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal case string(v1.ServiceExternalTrafficPolicyTypeCluster): svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster default: err := fmt.Errorf("invalid externalTrafficPolicy %q, must be %q or %q", externalPolicy, v1.ServiceExternalTrafficPolicyTypeLocal, v1.ServiceExternalTrafficPolicyTypeCluster) logrusField.Errorf("modify service %s err, %v", serviceName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode( v1alpha1.CreateFailExperimentStatus(err.Error(), []v1alpha1.ResourceStatus{status}), ) } } if internalPolicy != "" { originalInternal := "" if svc.Spec.InternalTrafficPolicy != nil { originalInternal = string(*svc.Spec.InternalTrafficPolicy) } history[InternalTrafficPolicyFlag] = originalInternal policy := v1.ServiceInternalTrafficPolicyType(internalPolicy) svc.Spec.InternalTrafficPolicy = &policy } historyBytes, err := json.Marshal(history) if err != nil { logrusField.Errorf("marshal modify history for service %s err, %v", serviceName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode( v1alpha1.CreateFailExperimentStatus(err.Error(), []v1alpha1.ResourceStatus{status}), ) } svc.Annotations[ServiceAnnotation] = fmt.Sprintf("modify-%s", uid) svc.Annotations[ServiceModifyHistoryAnnotation] = string(historyBytes) if err := d.client.Update(context.TODO(), svc); err != nil { logrusField.Errorf("update service %s err, %v", serviceName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) return spec.ReturnResultIgnoreCode( v1alpha1.CreateFailExperimentStatus(err.Error(), []v1alpha1.ResourceStatus{status}), ) } status = status.CreateSuccessResourceStatus() return spec.ReturnResultIgnoreCode(v1alpha1.CreateSuccessExperimentStatus([]v1alpha1.ResourceStatus{status})) } func (d *ModifyServiceActionExecutor) destroy(uid string, ctx context.Context, expModel *spec.ExpModel) *spec.Response { logrusField := logrus.WithField("experiment", model.GetExperimentIdFromContext(ctx)) serviceMetaList, err := GetServiceMetaListFromContext(ctx) if err != nil { util.Errorf(uid, util.GetRunFuncName(), err.Error()) return spec.ResponseFailWithResult(spec.ContainerInContextNotFound, v1alpha1.CreateFailExperimentStatus("cannot get service meta from context", []v1alpha1.ResourceStatus{})) } statuses := make([]v1alpha1.ResourceStatus, 0) for _, meta := range serviceMetaList { status := v1alpha1.ResourceStatus{ Id: meta.Id, Kind: v1alpha1.ServiceKind, Identifier: fmt.Sprintf("%s/%s", meta.Namespace, meta.ServiceName), } svc := &v1.Service{} objectKey := types.NamespacedName{Name: meta.ServiceName, Namespace: meta.Namespace} if err := d.client.Get(context.TODO(), objectKey, svc); err != nil { logrusField.Errorf("get service %s for restoring err, %v", meta.ServiceName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } expected := fmt.Sprintf("modify-%s", uid) if existing, ok := svc.Annotations[ServiceAnnotation]; ok && existing == expected { if historyStr, hasHistory := svc.Annotations[ServiceModifyHistoryAnnotation]; hasHistory { history := make(map[string]string) if err := json.Unmarshal([]byte(historyStr), &history); err != nil { logrusField.Errorf("unmarshal modify history for service %s err, %v", meta.ServiceName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } if original, exists := history[ExternalTrafficPolicyFlag]; exists { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(original) } if original, exists := history[InternalTrafficPolicyFlag]; exists { if original == "" { svc.Spec.InternalTrafficPolicy = nil } else { restored := v1.ServiceInternalTrafficPolicyType(original) svc.Spec.InternalTrafficPolicy = &restored } } delete(svc.Annotations, ServiceModifyHistoryAnnotation) } delete(svc.Annotations, ServiceAnnotation) if err := d.client.Update(context.TODO(), svc); err != nil { logrusField.Errorf("restore service %s err, %v", meta.ServiceName, err) status = status.CreateFailResourceStatus(err.Error(), spec.K8sExecFailed.Code) statuses = append(statuses, status) continue } } status.State = v1alpha1.DestroyedState status.Success = true statuses = append(statuses, status) } return spec.ReturnResultIgnoreCode(v1alpha1.CreateDestroyedExperimentStatus(statuses)) } func isValidPolicy(policy string) bool { return policy == "Local" || policy == "Cluster" } ================================================ FILE: exec/service/service.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package service import ( "github.com/chaosblade-io/chaosblade-spec-go/spec" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec/model" ) type ResourceModelSpec struct { model.BaseResourceExpModelSpec } func NewResourceModelSpec(client *channel.Client) model.ResourceExpModelSpec { modelSpec := &ResourceModelSpec{ model.NewBaseResourceExpModelSpec("service", client), } expModels := []spec.ExpModelCommandSpec{ NewSelfExpModelCommandSpec(client), } spec.AddFlagsToModelSpec(getResourceFlags, expModels...) modelSpec.RegisterExpModels(expModels...) return modelSpec } func getResourceFlags() []spec.ExpFlagSpec { return []spec.ExpFlagSpec{ &spec.ExpFlag{ Name: "namespace", Desc: "Namespace for the services, default is default", NoArgs: false, Required: false, }, } } type SelfExpModelCommandSpec struct { spec.BaseExpModelCommandSpec } func NewSelfExpModelCommandSpec(client *channel.Client) spec.ExpModelCommandSpec { return &SelfExpModelCommandSpec{ spec.BaseExpModelCommandSpec{ ExpFlags: []spec.ExpFlagSpec{}, ExpActions: []spec.ExpActionCommandSpec{ NewCreateServiceActionSpec(client), NewModifyServiceActionSpec(client), }, }, } } func (*SelfExpModelCommandSpec) Name() string { return "self" } func (*SelfExpModelCommandSpec) ShortDesc() string { return "Service experiments" } func (*SelfExpModelCommandSpec) LongDesc() string { return "Service experiments, such as creating services in batch or modifying service traffic policy" } func (*SelfExpModelCommandSpec) Example() string { return "blade create k8s service-self create --name-prefix my-service --namespace default --count 1000 --kubeconfig ~/.kube/config" } ================================================ FILE: go.mod ================================================ // Copyright 2025 The ChaosBlade Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. module github.com/chaosblade-io/chaosblade-operator go 1.25 require ( github.com/chaosblade-io/chaosblade-exec-cri v1.8.0 github.com/chaosblade-io/chaosblade-exec-os v1.8.1-0.20260422111537-3efa2ec759a6 github.com/chaosblade-io/chaosblade-spec-go v1.8.0 github.com/ethercflow/hookfs v0.3.0 github.com/hanwen/go-fuse v1.0.0 github.com/operator-framework/operator-sdk v0.17.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/pflag v1.0.5 k8s.io/api v0.31.0 k8s.io/apimachinery v0.31.0 k8s.io/client-go v12.0.0+incompatible sigs.k8s.io/controller-runtime v0.19.4 ) require ( cyphar.com/go-pathrs v0.2.1 // indirect github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.21 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cilium/ebpf v0.16.0 // indirect github.com/containerd/cgroups v1.0.2-0.20210605143700-23b51209bf7b // indirect github.com/containerd/containerd v1.5.6 // indirect github.com/containerd/continuity v0.1.0 // indirect github.com/containerd/fifo v1.0.0 // indirect github.com/containerd/ttrpc v1.0.2 // indirect github.com/containerd/typeurl v1.0.2 // indirect github.com/coreos/go-systemd/v22 v22.6.0 // indirect github.com/cyphar/filepath-securejoin v0.6.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.4 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/goodhosts/hostsfile v0.1.6 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.11.13 // indirect github.com/magefile/mage v1.15.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.5.1 // indirect github.com/moby/sys/mountinfo v0.7.1 // indirect github.com/moby/sys/user v0.3.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/runc v1.2.8 // indirect github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/opencontainers/selinux v1.13.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/tklauser/go-sysconf v0.3.15 // indirect github.com/tklauser/numcpus v0.10.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opencensus.io v0.22.3 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/crypto v0.46.0 // indirect golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect golang.org/x/net v0.48.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.39.0 // indirect golang.org/x/term v0.38.0 // indirect golang.org/x/text v0.32.0 // indirect golang.org/x/time v0.3.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a // indirect google.golang.org/grpc v1.79.3 // indirect google.golang.org/protobuf v1.36.10 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog v1.0.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) replace ( k8s.io/client-go => k8s.io/client-go v0.31.0 k8s.io/client-go/kubernetes/scheme => k8s.io/client-go/kubernetes/scheme v0.31.0 ) ================================================ FILE: go.sum ================================================ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.49.0/go.mod h1:hGvAdzcWNbyuxS3nWhD7H2cIJxjRRTRLQVB0bdputVY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.3.0/go.mod h1:9IAwXhoyBJ7z9LcAwkj0/7NnPzYaPeZxxVp3zm+5IqA= contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= cyphar.com/go-pathrs v0.2.1 h1:9nx1vOgwVvX1mNBWDu93+vaceedpbsDqo+XuBGL40b8= cyphar.com/go-pathrs v0.2.1/go.mod h1:y8f1EMG7r+hCuFf/rXsKqMJrJAUoADZGNh5/vZPKcGc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig/v3 v3.0.2/go.mod h1:oesJ8kPONMONaZgtiHNzUShJbksypC5kWczhZAf6+aU= github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21 h1:btRfUDThBE5IKcvI8O8jOiIkujUsAMBSRsYDYmEi6oM= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= github.com/brancz/gojsontoyaml v0.0.0-20190425155809-e8bd32d46b3d/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/chaosblade-io/chaosblade-exec-cri v1.8.0 h1:mLv5fp0aEzZ5DFHZ1bZIyNJCVagh1OSm8Qqi0NQEApU= github.com/chaosblade-io/chaosblade-exec-cri v1.8.0/go.mod h1:o+W6ELOzrLodXclewVKq8TIQdmdMoNaWw34Va4pyX9s= github.com/chaosblade-io/chaosblade-exec-os v1.8.1-0.20260422111537-3efa2ec759a6 h1:KhSS5jhrTpxePjRM39JllNcIr9aqUoKu+/1uhcT8gAQ= github.com/chaosblade-io/chaosblade-exec-os v1.8.1-0.20260422111537-3efa2ec759a6/go.mod h1:k6QrG3w8mEwvk/6CCGC+K1S+K8u0s1xfHoa/dOGdADo= github.com/chaosblade-io/chaosblade-spec-go v1.8.0 h1:UtwBZCXUJMAtqlmUrfTwmzolxfdnY02Itz1o84Ua0b4= github.com/chaosblade-io/chaosblade-spec-go v1.8.0/go.mod h1:xxkKn6Ve25MmqX1vWp1/swl4g+o0rJP1Rkp2ph3DzT8= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/cgroups v1.0.2-0.20210605143700-23b51209bf7b h1:mrRq0rkLJnQOfalr7EwNn1ULsMoyGvD+8kN+hxeNRms= github.com/containerd/cgroups v1.0.2-0.20210605143700-23b51209bf7b/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.6 h1:yi692sMr9kyyaps9dyodk3vVOTNM9fIPvlZp4UnyT4U= github.com/containerd/containerd v1.5.6/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU= github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/prometheus-operator v0.38.0/go.mod h1:xZC7/TgeC0/mBaJk+1H9dbHaiEvLYHgX6Mi1h40UPh8= github.com/corpix/uarand v0.0.0-20170723150923-031be390f409 h1:9A+mfQmwzZ6KwUXPc8nHxFtKgn9VIvO3gXAOspIcE3s= github.com/corpix/uarand v0.0.0-20170723150923-031be390f409/go.mod h1:JSm890tOkDN+M1jqN8pUGDKnzJrsVbJwSMHBY4zwz7M= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.6.0 h1:BtGB77njd6SVO6VztOHfPxKitJvd/VPT+OFBFMOi1Is= github.com/cyphar/filepath-securejoin v0.6.0/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4= github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A= github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE= github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce h1:KXS1Jg+ddGcWA8e1N7cupxaHHZhit5rB9tfDU+mfjyY= github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elastic/go-sysinfo v1.0.1/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ethercflow/hookfs v0.3.0 h1:PfA/56Q22Dc7LtAM/6dDnlom4GAo4OpaFc3K2Xo7ZvM= github.com/ethercflow/hookfs v0.3.0/go.mod h1:c4t7EbwfiU+xsSjeZrlup9p5boB/PT3HD9sTq57K/4M= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/goodhosts/hostsfile v0.1.6 h1:aK6DxpNV6pZ1NbdvNE2vYBMTnvIJF5O2J/8ZOlp2eMY= github.com/goodhosts/hostsfile v0.1.6/go.mod h1:bkCocEIf3Ca0hcBustUZoWYhOgKUaIK+47m8fBjoBx8= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-health-probe v0.2.1-0.20181220223928-2bf0a5b182db/go.mod h1:uBKkC2RbarFsvS5jMJHpVhTLvGlGQj9JJwkaePE3FWI= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hanwen/go-fuse v0.0.0-20190111173210-425e8d5301f6/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse v1.0.0 h1:GxS9Zrn6c35/BnfiVsZVWmsG803xwE7eVRDvcf/BEVc= github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/helm/helm-2to3 v0.5.1/go.mod h1:AXFpQX2cSQpss+47ROPEeu7Sm4+CRJ1jKWCEQdHP3/c= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/icrowley/fake v0.0.0-20221112152111-d7b7e2276db2 h1:qU3v73XG4QAqCPHA4HOpfC1EfUvtLIDvQK4mNQ0LvgI= github.com/icrowley/fake v0.0.0-20221112152111-d7b7e2276db2/go.mod h1:dQ6TM/OGAe+cMws81eTe4Btv1dKxfPZ2CX+YaAFAPN4= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jsonnet-bundler/jsonnet-bundler v0.2.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/maorfr/helm-plugin-utils v0.0.0-20200216074820-36d2fcf6ae86/go.mod h1:p3gwmRSFqbWw6plBpR0sKl3n3vpu8kX70gvCJKMvvCA= github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/martinlindhe/base36 v1.0.0/go.mod h1:+AtEs8xrBpCeYgSLoY/aJ6Wf37jtBuR0s35750M27+8= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU= github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/spdystream v0.5.1 h1:9sNYeYZUcci9R6/w7KDaFWEWeV4LStVG78Mpyq/Zm/Y= github.com/moby/spdystream v0.5.1/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc= github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk= github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo= github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0= github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM= github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw= github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.2.8 h1:RnEICeDReapbZ5lZEgHvj7E9Q3Eex9toYmaGBsbvU5Q= github.com/opencontainers/runc v1.2.8/go.mod h1:cC0YkmZcuvr+rtBZ6T7NBoVbMGNAdLa/21vIElJDOzI= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.13.0 h1:Zza88GWezyT7RLql12URvoxsbLfjFx988+LGaWfbL84= github.com/opencontainers/selinux v1.13.0/go.mod h1:XxWTed+A/s5NNq4GmYScVy+9jzXhGBVEOAyucdRUY8s= github.com/openshift/api v0.0.0-20200205133042-34f0ec8dab87/go.mod h1:fT6U/JfG8uZzemTRwZA2kBDJP5nWz7v05UHnty/D+pk= github.com/openshift/client-go v0.0.0-20190923180330-3b6373338c9b/go.mod h1:6rzn+JTr7+WYS2E1TExP4gByoABxMznR6y2SnUIkmxk= github.com/openshift/origin v0.0.0-20160503220234-8f127d736703/go.mod h1:0Rox5r9C8aQn6j1oAOQ0c1uC86mYbUFObzjBRvUKHII= github.com/openshift/prom-label-proxy v0.1.1-0.20191016113035-b8153a7f39f1/go.mod h1:p5MuxzsYP1JPsNGwtjtcgRHHlGziCJJfztff91nNixw= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/operator-framework/api v0.1.1/go.mod h1:yzNYR7qyJqRGOOp+bT6Z/iYSbSPNxeh3Si93Gx/3OBY= github.com/operator-framework/operator-lifecycle-manager v0.0.0-20200321030439-57b580e57e88/go.mod h1:7Ut8p9jJ8C6RZyyhZfZypmlibCIJwK5Wcc+WZDgLkOA= github.com/operator-framework/operator-registry v1.5.3/go.mod h1:agrQlkWOo1q8U1SAaLSS2WQ+Z9vswNT2M2HFib9iuLY= github.com/operator-framework/operator-registry v1.6.1/go.mod h1:sx4wWMiZtYhlUiaKscg3QQUPPM/c1bkrAs4n4KipDb4= github.com/operator-framework/operator-registry v1.6.2-0.20200330184612-11867930adb5/go.mod h1:SHff373z8asEkPo6aWpN0qId4Y/feQTjZxRF8PRhti8= github.com/operator-framework/operator-sdk v0.17.0 h1:+TTrGjXa+lm7g7Cm0UtFcgOjnw1x9/lBorydpsIIhOY= github.com/operator-framework/operator-sdk v0.17.0/go.mod h1:wmYi08aoUmtgfoUamURmssI4dkdFGNtSI1Egj+ZfBnk= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/otiai10/copy v1.0.1/go.mod h1:8bMCJrAqOtN/d9oyh5HR7HhLQMvcGMpGdwRDYsfOCHc= github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v0.0.0-20190513014714-f5a3d24e5776/go.mod h1:3HNVkVOU7vZeFXocWuvtcS0XSFLcf2XUSDHkq9t1jU4= github.com/otiai10/mint v1.2.3/go.mod h1:YnfyPNhBvnY8bW4SGQHCs/aAFhkgySlMZbrF5U0bOVw= github.com/otiai10/mint v1.2.4/go.mod h1:d+b7n/0R3tdyUYYylALXpWQ/kTN+QobSq/4SRGBkR3M= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v1.8.2-0.20200110114423-1e64d757f711/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v2.3.2+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/robfig/cron v0.0.0-20170526150127-736158dc09e1/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rubenv/sql-migrate v0.0.0-20191025130928-9355dd04f4b3/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= go.elastic.co/apm/module/apmot v1.5.0/go.mod h1:d2KYwhJParTpyw2WnTNy8geNlHKKFX+4oK3YLlsesWE= go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190918214516-5a1a30219888/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200327195553-82bb89366a1e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto= gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEIu7yTU= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0Eppok+U= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= helm.sh/helm/v3 v3.1.0/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= helm.sh/helm/v3 v3.1.2/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjTEKBGgvsf2OraTuRtLFU= k8s.io/api v0.16.7/go.mod h1:oUAiGRgo4t+5yqcxjOu5LoHT3wJ8JSbgczkaFYS5L7I= k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= k8s.io/api v0.17.1/go.mod h1:zxiAc5y8Ngn4fmhWUtSxuUlkfz1ixT7j9wESokELzOg= k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0= k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= k8s.io/apiextensions-apiserver v0.16.7/go.mod h1:6xYRp4trGp6eT5WZ6tPi/TB2nfWQCzwUvBlpg8iswe0= k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= k8s.io/apiextensions-apiserver v0.17.3/go.mod h1:CJbCyMfkKftAd/X/V6OTHYhVn7zXnDdnkUjS1h0GTeY= k8s.io/apiextensions-apiserver v0.17.4/go.mod h1:rCbbbaFS/s3Qau3/1HbPlHblrWpFivoaLYccCffvQGI= k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= k8s.io/apimachinery v0.16.7/go.mod h1:Xk2vD2TRRpuWYLQNM6lT9R7DSFZUYG03SarNkbGrnKE= k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= k8s.io/apiserver v0.16.7/go.mod h1:/5zSatF30/L9zYfMTl55jzzOnx7r/gGv5a5wtRp8yAw= k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI= k8s.io/cli-runtime v0.17.3/go.mod h1:X7idckYphH4SZflgNpOOViSxetiMj6xI0viMAjM81TA= k8s.io/cli-runtime v0.17.4/go.mod h1:IVW4zrKKx/8gBgNNkhiUIc7nZbVVNhc1+HcQh+PiNHc= k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= k8s.io/code-generator v0.16.7/go.mod h1:wFdrXdVi/UC+xIfLi+4l9elsTT/uEF61IfcN2wOLULQ= k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.17.1/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.17.3/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= k8s.io/code-generator v0.17.4/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= k8s.io/component-base v0.16.7/go.mod h1:ikdyfezOFMu5O0qJjy/Y9eXwj+fV3pVwdmt0ulVcIR0= k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= k8s.io/component-base v0.17.3/go.mod h1:GeQf4BrgelWm64PXkIXiPh/XS0hnO42d9gx9BtbZRp8= k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20191010091904-7fa3014cb28f/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= k8s.io/helm v2.16.3+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-aggregator v0.17.3/go.mod h1:1dMwMFQbmH76RKF0614L7dNenMl3dwnUJuOOyZ3GMXA= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+IjYA/E= k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk= k8s.io/kubectl v0.17.3/go.mod h1:NUn4IBY7f7yCMwSop2HCXlw/MVYP4HJBiUmOR3n9w28= k8s.io/kubectl v0.17.4/go.mod h1:im5QWmh6fvtmJkkNm4HToLe8z9aM3jihYK5X/wOybcY= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= k8s.io/metrics v0.17.3/go.mod h1:HEJGy1fhHOjHggW9rMDBJBD3YuGroH3Y1pnIRw9FFaI= k8s.io/metrics v0.17.4/go.mod h1:6rylW2iD3M9VppnEAAtJASY1XS8Pt9tcYh+tHxBeV3I= k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.5.2/go.mod h1:JZUwSMVbxDupo0lTJSSFP5pimEyxGynROImSsqIOx1A= sigs.k8s.io/controller-runtime v0.19.4 h1:SUmheabttt0nx8uJtoII4oIP27BVVvAKFvdvGFwV/Qo= sigs.k8s.io/controller-runtime v0.19.4/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff v1.0.2/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= ================================================ FILE: hack/init.sh ================================================ #!/usr/bin/env bash # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. function git_find() { # Similar to find but faster and easier to understand. We want to include # modified and untracked files because this might be running against code # which is not tracked by git yet. git ls-files -cmo --exclude-standard \ ':!:vendor/*' `# catches vendor/...` \ ':!:*/vendor/*' `# catches any subdir/vendor/...` \ ':!:third_party/*' `# catches third_party/...` \ ':!:*/third_party/*' `# catches third_party/...` \ ':!:*/testdata/*' `# catches any subdir/testdata/...` \ ':(glob)**/*.go' \ "$@" } ================================================ FILE: hack/update-gofmt.sh ================================================ #!/usr/bin/env bash # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail source "$(dirname "$0")/init.sh" go install mvdan.cc/gofumpt@v0.10.0 # Serially process each file to avoid concurrent write issues for f in $(git_find); do gofumpt -w "$f" done ================================================ FILE: hack/update-imports.sh ================================================ #!/usr/bin/env bash # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail source "$(dirname "$0")/init.sh" go install golang.org/x/tools/cmd/goimports@latest # Serially process each file to avoid concurrent write issues for f in $(git_find); do goimports -w -local github.com/chaosblade-io/chaosblade-operator -srcdir "$(dirname "$f")" "$f" done ================================================ FILE: hack/verify-gofmt.sh ================================================ #!/usr/bin/env bash # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail source "$(dirname "$0")/init.sh" go install mvdan.cc/gofumpt@v0.10.0 # gofmt exits with non-zero exit code if it finds a problem unrelated to # formatting (e.g., a file does not parse correctly). Without "|| true" this # would have led to no useful error message from gofmt, because the script would # have failed before getting to the "echo" in the block below. diff=$(git_find | xargs gofumpt -d 2>&1) || true if [[ -n "${diff}" ]]; then echo "${diff}" >&2 echo >&2 echo "Run ./hack/update-gofmt.sh" >&2 exit 1 fi ================================================ FILE: hack/verify-imports.sh ================================================ #!/usr/bin/env bash # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail source "$(dirname "$0")/init.sh" go install golang.org/x/tools/cmd/goimports@latest diff=$(git_find | xargs goimports -l -local github.com/chaosblade-io/chaosblade-operator 2>&1) || true if [[ -n "${diff}" ]]; then echo "The following files have incorrect import order. Please run ./hack/update-imports.sh to fix them:" >&2 echo "${diff}" >&2 exit 1 fi ================================================ FILE: licenserc.toml ================================================ # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Base directory for the whole execution. # All relative paths is based on this path. # default: current working directory baseDir = "." headerPath = "Apache-2.0.txt" # On enabled, check the license header matches exactly with whitespace. # Otherwise, strip the header in one line and check. # default: true strictCheck = true # Whether you use the default excludes. Check Default.EXCLUDES for the completed list. # To suppress part of excludes in the list, declare exact the same pattern in `includes` list. # default: true useDefaultExcludes = true excludes = [ "*.txt", ] # The supported patterns of includes and excludes follow gitignore pattern format, plus that: # 1. `includes` does not support `!` # 2. backslash does not escape letter # 3. whitespaces and `#` are normal since we configure line by line # See also https://git-scm.com/docs/gitignore#_pattern_format # Keywords that should occur in the header, case-insensitive. # default: ["copyright"] keywords = ["copyright", ] # Whether you use the default mapping. Check DocumentType.defaultMapping() for the completed list. # default: true useDefaultMapping = true # Properties to fulfill the template. # For a defined key-value pair, you can use {{props["key"]}} in the header template, which will be # substituted with the corresponding value. [properties] inceptionYear = 2025 copyrightOwner = "The ChaosBlade Authors" # There are also preset attributes that can be used in the header template (no need to surround them with `props[]`).: # * 'attrs.filename' is the current file name, like: pom.xml. # Options to configure Git features. [git] # If enabled, do not process files that are ignored by Git; possible value: ['auto', 'enable', 'disable'] # 'auto' means this feature tries to be enabled with: # * gix - if `basedir` is in a Git repository. # * ignore crate's gitignore rules - if `basedir` is not in a Git repository. # 'enable' means always enabled with gix; failed if it is impossible. # default: 'auto' ignore = 'auto' # If enabled, populate file attrs determinated by Git; possible value: ['auto', 'enable', 'disable'] # Attributes contains: # * 'attrs.git_file_created_year' # * 'attrs.git_file_modified_year' # 'auto' means this feature tries to be enabled with: # * gix - if `basedir` is in a Git repository. # 'enable' means always enabled with gix; failed if it is impossible. # default: 'disable' attrs = 'disable' ================================================ FILE: pkg/apis/addtoscheme_chaosblade_v1alpha1.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package apis import ( "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) func init() { // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) } ================================================ FILE: pkg/apis/apis.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package apis import ( "k8s.io/apimachinery/pkg/runtime" ) // AddToSchemes may be used to add all resources defined in the project to a Scheme var AddToSchemes runtime.SchemeBuilder // AddToScheme adds all Resources to the Scheme func AddToScheme(s *runtime.Scheme) error { return AddToSchemes.AddToScheme(s) } ================================================ FILE: pkg/apis/chaosblade/group.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Package chaosblade contains chaosblade API versions. // // This file ensures Go source parsers acknowledge the chaosblade package // and any child packages. It can be removed if any other Go source files are // added to this package. package chaosblade ================================================ FILE: pkg/apis/chaosblade/v1alpha1/doc.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Package v1alpha1 contains API Schema definitions for the chaosblade v1alpha1 API group // +k8s:deepcopy-gen=package,register // +groupName=chaosblade.io package v1alpha1 ================================================ FILE: pkg/apis/chaosblade/v1alpha1/register.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // NOTE: Boilerplate only. Ignore this file. // Package v1alpha1 contains API Schema definitions for the chaosblade v1alpha1 API group // +k8s:deepcopy-gen=package,register // +groupName=chaosblade.io package v1alpha1 import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( // SchemeGroupVersion is group version used to register these objects SchemeGroupVersion = schema.GroupVersion{Group: "chaosblade.io", Version: "v1alpha1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} ) ================================================ FILE: pkg/apis/chaosblade/v1alpha1/types.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. type ClusterPhase string const ( ClusterPhaseInitial ClusterPhase = "" ClusterPhaseInitialized ClusterPhase = "Initialized" ClusterPhaseRunning ClusterPhase = "Running" ClusterPhaseUpdating ClusterPhase = "Updating" ClusterPhaseDestroying ClusterPhase = "Destroying" ClusterPhaseDestroyed ClusterPhase = "Destroyed" ClusterPhaseError ClusterPhase = "Error" ) // ChaosBladeSpec defines the desired state of ChaosBlade // +k8s:openapi-gen=true type ChaosBladeSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html Experiments []ExperimentSpec `json:"experiments"` } type ExperimentSpec struct { // Scope is the area of the experiments, currently support node, pod and container Scope string `json:"scope"` // Target is the experiment target, such as cpu, network Target string `json:"target"` // Action is the experiment scenario of the target, such as delay, load Action string `json:"action"` // Desc is the experiment description Desc string `json:"desc,omitempty"` // Matchers is the experiment rules Matchers []FlagSpec `json:"matchers,omitempty"` } type FlagSpec struct { // Name is the name of flag Name string `json:"name"` // TODO: Temporarily defined as an array for all flags // Value is the value of flag Value []string `json:"value"` } // ChaosBladeStatus defines the observed state of ChaosBlade // +k8s:openapi-gen=true type ChaosBladeStatus struct { // Phase indicates the state of the experiment // Initial -> Running -> Updating -> Destroying -> Destroyed Phase ClusterPhase `json:"phase,omitempty"` // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html ExpStatuses []ExperimentStatus `json:"expStatuses"` } func (in *ResourceStatus) CreateFailResourceStatus(err string, code int32) ResourceStatus { in.State = ErrorState in.Error = err in.Success = false in.Code = code return *in } func (in *ResourceStatus) CreateSuccessResourceStatus() ResourceStatus { in.State = SuccessState in.Success = true return *in } const ( PodKind = "pod" ContainerKind = "container" NodeKind = "node" ServiceKind = "service" ) type ResourceStatus struct { // experiment uid in chaosblade Id string `json:"id,omitempty"` // experiment state State string `json:"state"` // experiment code Code int32 `json:"code,omitempty"` // experiment error Error string `json:"error,omitempty"` // success Success bool `json:"success"` // Kind Kind string `json:"kind"` // Resource identifier, rules as following: // container: Namespace/NodeName/PodName/ContainerName // pod: Namespace/NodeName/PodName Identifier string `json:"identifier,omitempty"` } const ( SuccessState = "Success" ErrorState = "Error" DestroyedState = "Destroyed" ) func CreateFailExperimentStatus(err string, ResStatuses []ResourceStatus) ExperimentStatus { return ExperimentStatus{Success: false, State: ErrorState, Error: err, ResStatuses: ResStatuses} } func CreateSuccessExperimentStatus(ResStatuses []ResourceStatus) ExperimentStatus { return ExperimentStatus{Success: true, State: SuccessState, ResStatuses: ResStatuses} } func CreateDestroyedExperimentStatus(ResStatuses []ResourceStatus) ExperimentStatus { return ExperimentStatus{Success: true, State: DestroyedState, ResStatuses: ResStatuses} } func CreateFailResStatuses(code int32, err, uid string) []ResourceStatus { statuses := make([]ResourceStatus, 0) statuses = append(statuses, ResourceStatus{ Error: err, Code: code, Id: uid, Success: false, }) return statuses } type ExperimentStatus struct { // experiment scope for cache Scope string `json:"scope"` Target string `json:"target"` Action string `json:"action"` // Success is used to judge the experiment result Success bool `json:"success"` // State is used to describe the experiment result State string `json:"state"` Error string `json:"error,omitempty"` // ResStatuses is the details of the experiment ResStatuses []ResourceStatus `json:"resStatuses,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ChaosBlade is the Schema for the chaosblades API // +k8s:openapi-gen=true // +kubebuilder:subresource:status type ChaosBlade struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec ChaosBladeSpec `json:"spec,omitempty"` Status ChaosBladeStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ChaosBladeList contains a list of ChaosBlade type ChaosBladeList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []ChaosBlade `json:"items"` } func init() { SchemeBuilder.Register(&ChaosBlade{}, &ChaosBladeList{}) } ================================================ FILE: pkg/apis/chaosblade/v1alpha1/zz_generated.deepcopy.go ================================================ //go:build !ignore_autogenerated // +build !ignore_autogenerated /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Code generated by operator-sdk. DO NOT EDIT. package v1alpha1 import ( runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ChaosBlade) DeepCopyInto(out *ChaosBlade) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosBlade. func (in *ChaosBlade) DeepCopy() *ChaosBlade { if in == nil { return nil } out := new(ChaosBlade) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ChaosBlade) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ChaosBladeList) DeepCopyInto(out *ChaosBladeList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ChaosBlade, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosBladeList. func (in *ChaosBladeList) DeepCopy() *ChaosBladeList { if in == nil { return nil } out := new(ChaosBladeList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *ChaosBladeList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ChaosBladeSpec) DeepCopyInto(out *ChaosBladeSpec) { *out = *in if in.Experiments != nil { in, out := &in.Experiments, &out.Experiments *out = make([]ExperimentSpec, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosBladeSpec. func (in *ChaosBladeSpec) DeepCopy() *ChaosBladeSpec { if in == nil { return nil } out := new(ChaosBladeSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ChaosBladeStatus) DeepCopyInto(out *ChaosBladeStatus) { *out = *in if in.ExpStatuses != nil { in, out := &in.ExpStatuses, &out.ExpStatuses *out = make([]ExperimentStatus, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChaosBladeStatus. func (in *ChaosBladeStatus) DeepCopy() *ChaosBladeStatus { if in == nil { return nil } out := new(ChaosBladeStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExperimentSpec) DeepCopyInto(out *ExperimentSpec) { *out = *in if in.Matchers != nil { in, out := &in.Matchers, &out.Matchers *out = make([]FlagSpec, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentSpec. func (in *ExperimentSpec) DeepCopy() *ExperimentSpec { if in == nil { return nil } out := new(ExperimentSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExperimentStatus) DeepCopyInto(out *ExperimentStatus) { *out = *in if in.ResStatuses != nil { in, out := &in.ResStatuses, &out.ResStatuses *out = make([]ResourceStatus, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExperimentStatus. func (in *ExperimentStatus) DeepCopy() *ExperimentStatus { if in == nil { return nil } out := new(ExperimentStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FlagSpec) DeepCopyInto(out *FlagSpec) { *out = *in if in.Value != nil { in, out := &in.Value, &out.Value *out = make([]string, len(*in)) copy(*out, *in) } return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlagSpec. func (in *FlagSpec) DeepCopy() *FlagSpec { if in == nil { return nil } out := new(FlagSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { *out = *in return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. func (in *ResourceStatus) DeepCopy() *ResourceStatus { if in == nil { return nil } out := new(ResourceStatus) in.DeepCopyInto(out) return out } ================================================ FILE: pkg/controller/add_chaosblade.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package controller import ( "github.com/chaosblade-io/chaosblade-operator/pkg/controller/chaosblade" ) func init() { // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. AddToManagerFuncs = append(AddToManagerFuncs, chaosblade.Add) } ================================================ FILE: pkg/controller/chaosblade/controller.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package chaosblade import ( "context" "encoding/json" "fmt" "time" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" "github.com/chaosblade-io/chaosblade-operator/channel" "github.com/chaosblade-io/chaosblade-operator/exec" "github.com/chaosblade-io/chaosblade-operator/exec/model" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" runtime2 "github.com/chaosblade-io/chaosblade-operator/pkg/runtime" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" "github.com/chaosblade-io/chaosblade-operator/version" ) const chaosbladeFinalizer = "finalizer.chaosblade.io" /** * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller * business logic. Delete these comments after modifying this file.* */ // Add creates a new ChaosBlade Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager) error { if err := add(mgr, newReconciler(mgr)); err != nil { return err } // add periodically clean up blade ticker return mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { startPeriodicallyCleanUpBlade(ctx, mgr) return nil })) } // newReconciler returns a new reconcile.Reconciler func newReconciler(mgr manager.Manager) *ReconcileChaosBlade { cbClient := mgr.GetClient().(*channel.Client) return &ReconcileChaosBlade{ client: cbClient, scheme: mgr.GetScheme(), Executor: exec.NewDispatcherExecutor(cbClient), } } // add adds a new Controller to mgr with r as the reconcile.Reconciler func add(mgr manager.Manager, rcb *ReconcileChaosBlade) error { // Create a new controller c, err := controller.New("chaosblade-controller", mgr, controller.Options{ Reconciler: rcb, MaxConcurrentReconciles: runtime2.MaxConcurrentReconciles, }) if err != nil { return err } // Watch for changes to primary resource ChaosBlade cb := v1alpha1.ChaosBlade{} err = c.Watch(source.Kind( mgr.GetCache(), &cb, &handler.TypedEnqueueRequestForObject[*v1alpha1.ChaosBlade]{}, &SpecUpdatedPredicateForRunningPhase{}, )) if err != nil { return err } if chaosblade.DaemonsetEnable { //namespace, err := k8sutil.GetOperatorNamespace() //if err != nil { // return err //} //chaosblade.DaemonsetPodNamespace = namespace //// deploy chaosblade tool //if err := deployChaosBladeTool(rcb); err != nil { // logrus.WithField("product", version.Product).WithError(err).Errorln("Failed to deploy chaosblade tool") // return err //} logrus.WithField("product", version.Product).WithField("daemonset.enable", chaosblade.DaemonsetEnable). Infoln("enable chaosblade-tool deamonset") } return nil } // if blade status is destroying func startPeriodicallyCleanUpBlade(ctx context.Context, mgr manager.Manager) { go func() { cli := mgr.GetClient() duration, err := time.ParseDuration(chaosblade.RemoveBladeInterval) if err != nil { logrus.Errorf("parse interval error: %v, use default interval: %s", err, chaosblade.DefaultRemoveBladeInterval) duration, err = time.ParseDuration(chaosblade.DefaultRemoveBladeInterval) chaosblade.RemoveBladeInterval = chaosblade.DefaultRemoveBladeInterval if err != nil { logrus.Fatalf("start periodically clean up blade, ticker error: %v", err) } } // first clean up periodicallyCleanUpBlade(ctx, cli, duration) // ticker clean up ticker := time.NewTicker(time.Second * time.Duration(duration.Seconds())) logrus.Infof("start periodically clean up blade ticker, interval: %s", chaosblade.RemoveBladeInterval) for range ticker.C { periodicallyCleanUpBlade(ctx, cli, duration) } }() } func periodicallyCleanUpBlade(ctx context.Context, cli client.Client, interval time.Duration) { results := &v1alpha1.ChaosBladeList{} if err := cli.List(ctx, results, &client.ListOptions{}); err != nil { logrus.Errorf("periodically clean up, list blade error: %v", err) } logrus.Infof("periodically clean up blade, blade size: %d", len(results.Items)) for _, item := range results.Items { if item.DeletionTimestamp == nil { continue } sub := time.Now().Sub(item.DeletionTimestamp.Time) if item.Status.Phase == v1alpha1.ClusterPhaseDestroying && sub.Seconds() > interval.Seconds() { logrus.Infof("periodically clean up blade %s, deletion time: %s", item.Name, item.DeletionTimestamp.String()) // patch blade if err := cli.Patch( ctx, &v1alpha1.ChaosBlade{ TypeMeta: metav1.TypeMeta{ APIVersion: "chaosblade.io/v1alpha1", Kind: "ChaosBlade", }, ObjectMeta: metav1.ObjectMeta{Name: item.Name}, }, client.RawPatch(types.MergePatchType, []byte(`{"metadata":{"finalizers":[]}}`)), ); err != nil { logrus.Errorf("patch blade: %s, error: %v", item.Name, err) } } } } // blank assignment to verify that ReconcileChaosBlade implements reconcile.Reconciler var _ reconcile.Reconciler = &ReconcileChaosBlade{} // ReconcileChaosBlade reconciles a ChaosBlade object type ReconcileChaosBlade struct { // This client, initialized using mgr.Client() above, is a split client // that reads objects from the cache and writes to the apiserver client *channel.Client scheme *runtime.Scheme Executor model.ExpController } // Reconcile reads that state of the cluster for a ChaosBlade object and makes changes based on the state read // and what is in the ChaosBlade.Spec // Note: // The Controller will requeue the Request to be processed again if the returned error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. func (r *ReconcileChaosBlade) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { reqLogger := logrus.WithField("Request.Name", request.Name) forget := reconcile.Result{} // Fetch the RC instance cb := &v1alpha1.ChaosBlade{} err := r.client.Get(ctx, request.NamespacedName, cb) if err != nil { return forget, nil } if len(cb.Spec.Experiments) == 0 { return forget, nil } // reqLogger.Info(fmt.Sprintf("chaosblade obj: %+v", cb)) // Destroyed->delete // Remove the Finalizer if the CR object status is destroyed to delete it if cb.Status.Phase == v1alpha1.ClusterPhaseDestroyed { // Re-fetch the CR to avoid conflict latestCb := &v1alpha1.ChaosBlade{} if err := r.client.Get(ctx, request.NamespacedName, latestCb); err != nil { reqLogger.WithError(err).Errorln("re-fetch chaosblade failed before removing finalizer") return reconcile.Result{}, err } latestCb.SetFinalizers(remove(latestCb.GetFinalizers(), chaosbladeFinalizer)) err := r.client.Update(ctx, latestCb) if err != nil { reqLogger.WithError(err).Errorln("remove chaosblade finalizer failed at destroyed phase") return reconcile.Result{}, err } return forget, nil } if cb.Status.Phase == v1alpha1.ClusterPhaseDestroying || cb.GetDeletionTimestamp() != nil { err := r.finalizeChaosBlade(ctx, reqLogger, cb, request.NamespacedName) if err != nil { reqLogger.WithError(err).Errorln("finalize chaosblade failed at destroying phase") return reconcile.Result{}, err } return forget, nil } // Initial->Initialized if cb.Status.Phase == v1alpha1.ClusterPhaseInitial { if contains(cb.GetFinalizers(), chaosbladeFinalizer) { cb.Status.Phase = v1alpha1.ClusterPhaseInitialized cb.Status.ExpStatuses = make([]v1alpha1.ExperimentStatus, 0) if err := r.client.Status().Update(ctx, cb); err != nil { reqLogger.WithError(err).Errorln("update chaosblade phase to Initialized failed") return reconcile.Result{}, err } } else { cb.SetFinalizers(append(cb.GetFinalizers(), chaosbladeFinalizer)) // Update CR if err := r.client.Update(ctx, cb); err != nil { reqLogger.WithError(err).Errorln("add finalizer to chaosblade failed") return reconcile.Result{}, err } } return forget, nil } // Initialized->Running/Error // TODO When all the master nodes are inaccessible, there is the possibility of re-execution. if cb.Status.Phase == v1alpha1.ClusterPhaseInitialized || cb.Status.Phase == v1alpha1.ClusterPhaseUpdating { originalPhase := cb.Status.Phase expStatusList := make([]v1alpha1.ExperimentStatus, 0) phase := v1alpha1.ClusterPhaseError for _, exp := range cb.Spec.Experiments { experimentStatus := r.Executor.Create(cb.Name, exp) if experimentStatus.Success { phase = v1alpha1.ClusterPhaseRunning } expStatusList = append(expStatusList, experimentStatus) } // Retry status update on conflict to avoid re-executing Create side effects if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { latestCb := &v1alpha1.ChaosBlade{} if err := r.client.Get(ctx, request.NamespacedName, latestCb); err != nil { return err } latestCb.Status.ExpStatuses = expStatusList latestCb.Status.Phase = phase return r.client.Status().Update(ctx, latestCb) }); err != nil { reqLogger.WithError(err).Errorf("update phase from %s to %s failed", originalPhase, phase) return reconcile.Result{}, err } return forget, nil } // Running/Error->Updating/Destroying if cb.Status.Phase == v1alpha1.ClusterPhaseRunning || cb.Status.Phase == v1alpha1.ClusterPhaseError { // Update CR, firstly destroy it and re-create the new CR phase := v1alpha1.ClusterPhaseUpdating originalPhase := cb.Status.Phase logrus.Infof("update cb: %+v", *cb) matchersString := cb.GetAnnotations()["preSpec"] if matchersString != "" { var oldSpec v1alpha1.ChaosBladeSpec err := json.Unmarshal([]byte(matchersString), &oldSpec) if err != nil { reqLogger.WithError(err).Errorf("unmarshal old spec failed, %s", matchersString) return forget, nil } // update annotation to cb if err = r.client.Update(ctx, cb); err != nil { reqLogger.WithError(err).Errorln("add annotation to chaosblade failed") } if cb.Status.ExpStatuses != nil { for idx, expStatus := range cb.Status.ExpStatuses { experimentStatus := r.Executor.Destroy(cb.Name, oldSpec.Experiments[idx], expStatus) if !experimentStatus.Success { phase = v1alpha1.ClusterPhaseDestroying } cb.Status.ExpStatuses[idx] = experimentStatus } } // Retry status update on conflict to avoid re-executing Destroy side effects updatedExpStatuses := make([]v1alpha1.ExperimentStatus, len(cb.Status.ExpStatuses)) copy(updatedExpStatuses, cb.Status.ExpStatuses) cb.Status.Phase = phase if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { latestCb := &v1alpha1.ChaosBlade{} if err := r.client.Get(ctx, request.NamespacedName, latestCb); err != nil { return err } latestCb.Status.ExpStatuses = updatedExpStatuses latestCb.Status.Phase = phase return r.client.Status().Update(ctx, latestCb) }); err != nil { reqLogger.WithError(err).Errorf("update phase from %s to %s failed", originalPhase, phase) } return forget, nil } reqLogger.Errorln("can not found matchers in annotations field") } return forget, nil } // finalizeChaosBlade func (r *ReconcileChaosBlade) finalizeChaosBlade(ctx context.Context, reqLogger *logrus.Entry, cb *v1alpha1.ChaosBlade, namespacedName types.NamespacedName) error { phase := v1alpha1.ClusterPhaseDestroyed reqLogger.Infoln("Finalize the chaosblade") if cb.Status.ExpStatuses != nil && len(cb.Spec.Experiments) == len(cb.Status.ExpStatuses) { for idx, exp := range cb.Spec.Experiments { oldExpStatus := cb.Status.ExpStatuses[idx] oldExpStatus = r.Executor.Destroy(cb.Name, exp, oldExpStatus) if !oldExpStatus.Success { phase = v1alpha1.ClusterPhaseDestroying } cb.Status.ExpStatuses[idx] = oldExpStatus } } // Retry status update on conflict to avoid re-executing Destroy side effects if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { latestCb := &v1alpha1.ChaosBlade{} if err := r.client.Get(ctx, namespacedName, latestCb); err != nil { return err } latestCb.Status.ExpStatuses = cb.Status.ExpStatuses latestCb.Status.Phase = phase return r.client.Status().Update(ctx, latestCb) }); err != nil { return fmt.Errorf("update chaosblade status failed in finalize phase, %v", err) } if phase == v1alpha1.ClusterPhaseDestroying { return fmt.Errorf("failed to destroy, please see the experiment status") } reqLogger.Info("Successfully finalized chaosblade") return nil } func contains(list []string, s string) bool { for _, v := range list { if v == s { return true } } return false } func remove(list []string, s string) []string { for i, v := range list { if v == s { list = append(list[:i], list[i+1:]...) } } return list } ================================================ FILE: pkg/controller/chaosblade/daemonset.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package chaosblade import ( "context" "fmt" "github.com/operator-framework/operator-sdk/pkg/k8sutil" "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" ) // Deploy the chaosblade tool with daemonset mode func deployChaosBladeTool(rcb *ReconcileChaosBlade) error { references, err := createOwnerReferences(rcb) if err != nil { return err } daemonSet := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: chaosblade.DaemonsetPodName, Namespace: chaosblade.DaemonsetPodNamespace, Labels: chaosblade.DaemonsetPodLabels, OwnerReferences: references, }, Spec: createDaemonsetSpec(), } if err := rcb.client.Create(context.TODO(), daemonSet); err != nil { if apierrors.IsAlreadyExists(err) { logrus.Info("chaosblade tool exits, skip to deploy") return nil } return err } return nil } func createOwnerReferences(rcb *ReconcileChaosBlade) ([]metav1.OwnerReference, error) { // get chaosblade operator deployment object // Using a unstructured object. u := &unstructured.Unstructured{} u.SetGroupVersionKind(schema.GroupVersionKind{ Group: "apps", Kind: "Deployment", Version: "v1", }) namespace, err := k8sutil.GetOperatorNamespace() if err != nil { return nil, err } err = rcb.client.Get(context.TODO(), types.NamespacedName{ Namespace: namespace, Name: "chaosblade-operator", }, u) if err != nil { logrus.WithError(err).Error("cannot get chaosblade-operator deployment from apps/v1") return nil, err } trueVar := true return []metav1.OwnerReference{ { APIVersion: u.GetAPIVersion(), Kind: u.GetKind(), Name: u.GetName(), UID: u.GetUID(), Controller: &trueVar, }, }, nil } // createDaemonsetSpec func createDaemonsetSpec() appsv1.DaemonSetSpec { return appsv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{MatchLabels: chaosblade.DaemonsetPodLabels}, Template: createPodTemplateSpec(), MinReadySeconds: 5, UpdateStrategy: appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}, } } // createPodTemplateSpec func createPodTemplateSpec() corev1.PodTemplateSpec { return corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: chaosblade.DaemonsetPodName, Labels: chaosblade.DaemonsetPodLabels, }, Spec: createPodSpec(), } } func createPodSpec() corev1.PodSpec { pathType := corev1.HostPathFileOrCreate periodSeconds := int64(30) return corev1.PodSpec{ Containers: []corev1.Container{createContainer()}, Affinity: createAffinity(), DNSPolicy: corev1.DNSClusterFirstWithHostNet, HostNetwork: true, HostPID: true, Tolerations: []corev1.Toleration{{Effect: corev1.TaintEffectNoSchedule, Operator: corev1.TolerationOpExists}}, TerminationGracePeriodSeconds: &periodSeconds, SchedulerName: corev1.DefaultSchedulerName, RestartPolicy: corev1.RestartPolicyAlways, Volumes: []corev1.Volume{ { Name: "docker-socket", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/run/docker.sock"}}, }, { Name: "chaosblade-db-volume", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{ Path: "/var/run/chaosblade.dat", Type: &pathType, }}, }, { Name: "hosts", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/etc/hosts"}}, }, }, } } func createAffinity() *corev1.Affinity { return &corev1.Affinity{ NodeAffinity: &corev1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ NodeSelectorTerms: []corev1.NodeSelectorTerm{ { MatchExpressions: []corev1.NodeSelectorRequirement{{ Key: "type", Operator: corev1.NodeSelectorOpNotIn, Values: []string{"virtual-kubelet"}, }}, }, }, }, }, } } func createContainer() corev1.Container { trueVar := true return corev1.Container{ Name: chaosblade.DaemonsetPodName, Image: fmt.Sprintf("%s:%s", chaosblade.Constant.ImageRepoFunc(), chaosblade.Version), ImagePullPolicy: corev1.PullPolicy(chaosblade.PullPolicy), VolumeMounts: []corev1.VolumeMount{ {Name: "docker-socket", MountPath: "/var/run/docker.sock"}, {Name: "chaosblade-db-volume", MountPath: "/opt/chaosblade/chaosblade.dat"}, {Name: "hosts", MountPath: "/etc/hosts"}, }, SecurityContext: &corev1.SecurityContext{Privileged: &trueVar}, } } ================================================ FILE: pkg/controller/chaosblade/predicate.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package chaosblade import ( "encoding/json" "reflect" "github.com/sirupsen/logrus" "sigs.k8s.io/controller-runtime/pkg/event" "github.com/chaosblade-io/chaosblade-operator/pkg/apis/chaosblade/v1alpha1" ) type SpecUpdatedPredicateForRunningPhase struct{} func (sup *SpecUpdatedPredicateForRunningPhase) Create(e event.TypedCreateEvent[*v1alpha1.ChaosBlade]) bool { if e.Object == nil { return false } obj := e.Object logrus.Infof("trigger create event, name: %s", obj.Name) logrus.Debugf("creating obj: %+v", obj) if obj.GetDeletionTimestamp() != nil { logrus.Infof("unexpected phase for cb creating, name: %s, phase: %s", obj.Name, obj.Status.Phase) return false } if obj.Status.Phase == v1alpha1.ClusterPhaseInitial { return true } logrus.Infof("unexpected phase for cb creating, name: %s, phase: %s", obj.Name, obj.Status.Phase) return false } func (*SpecUpdatedPredicateForRunningPhase) Delete(e event.TypedDeleteEvent[*v1alpha1.ChaosBlade]) bool { if e.Object == nil { return false } obj := e.Object logrus.Infof("trigger delete event, name: %s", obj.Name) logrus.Debugf("deleting obj: %+v", obj) return contains(obj.GetFinalizers(), chaosbladeFinalizer) } func (*SpecUpdatedPredicateForRunningPhase) Update(e event.TypedUpdateEvent[*v1alpha1.ChaosBlade]) bool { if e.ObjectOld == nil { return false } oldObj := e.ObjectOld logrus.Infof("trigger update event, name: %s", oldObj.Name) newObj := e.ObjectNew if newObj == nil { return false } logrus.Debugf("updating oldObj: %+v", oldObj) logrus.Debugf("updating newObj: %+v", newObj) if !reflect.DeepEqual(newObj.Spec, oldObj.Spec) { bytes, err := json.Marshal(oldObj.Spec.DeepCopy()) if err != nil { logrus.Warningf("marshal old spec failed, %+v", err) return false } newObj.SetAnnotations(map[string]string{"preSpec": string(bytes)}) return true } if newObj.Status.Phase == v1alpha1.ClusterPhaseInitial { return true } // delete Error chaosblade if oldObj.GetDeletionTimestamp() == nil && newObj.GetDeletionTimestamp() != nil { return true } if newObj.Status.Phase == v1alpha1.ClusterPhaseRunning || newObj.Status.Phase == v1alpha1.ClusterPhaseError || newObj.Status.Phase == v1alpha1.ClusterPhaseDestroying { return false } if newObj.Status.Phase != oldObj.Status.Phase { return true } if !reflect.DeepEqual(newObj.Status, oldObj.Status) { return true } if newObj.GetDeletionTimestamp() != nil { if contains(newObj.GetFinalizers(), chaosbladeFinalizer) { return true } logrus.Infof("cannot find the %s finalizer, so skip the update event", chaosbladeFinalizer) return false } logrus.Infof("spec not changed under %s phase, so skip the update event", newObj.Status.Phase) return false } func (*SpecUpdatedPredicateForRunningPhase) Generic(e event.TypedGenericEvent[*v1alpha1.ChaosBlade]) bool { return false } ================================================ FILE: pkg/controller/controller.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package controller import ( "sigs.k8s.io/controller-runtime/pkg/manager" ) // AddToManagerFuncs is a list of functions to add all Controllers to the Manager var AddToManagerFuncs []func(manager.Manager) error // AddToManager adds all Controllers to the Manager func AddToManager(m manager.Manager) error { for _, f := range AddToManagerFuncs { if err := f(m); err != nil { return err } } return nil } ================================================ FILE: pkg/hookfs/client.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package hookfs import ( "context" "encoding/json" "errors" "io/ioutil" "net" "net/http" "time" "github.com/chaosblade-io/chaosblade-spec-go/util" "github.com/sirupsen/logrus" ) type ChaosBladeHookClient struct { client *http.Client addr string } func NewChabladeHookClient(addr string) *ChaosBladeHookClient { return &ChaosBladeHookClient{ addr: addr, client: &http.Client{ Timeout: 30 * time.Second, Transport: &http.Transport{ DialContext: (&net.Dialer{ Timeout: 5 * time.Second, }).DialContext, DisableKeepAlives: true, }, }, } } func (c *ChaosBladeHookClient) InjectFault(ctx context.Context, injectMsg *InjectMessage) error { url := "http://" + c.addr + InjectPath body, err := json.Marshal(injectMsg) if err != nil { return err } logrus.WithField("injectMsg", injectMsg).Infoln("Inject fault") result, err, code := util.PostCurl(url, body, "application/json") if err != nil { return err } logrus.WithField("injectMsg", injectMsg).Infof("Response is %s", result) if code != http.StatusOK { return errors.New(result) } return nil } func (c *ChaosBladeHookClient) Revoke(ctx context.Context) error { url := "http://" + c.addr + RecoverPath req, err := http.NewRequest("GET", url, nil) if err != nil { return err } req.Header.Set("Content-Type", "application/json") resp, err := c.client.Do(req) if err != nil { return err } defer resp.Body.Close() bytes, err := ioutil.ReadAll(resp.Body) if err != nil { return err } result := string(bytes) logrus.Infof("Revoke fault, response is %s", result) if resp.StatusCode != http.StatusOK { return errors.New(result) } return nil } ================================================ FILE: pkg/hookfs/hook.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package hookfs import ( "math/rand" "path" "strings" "syscall" "time" "github.com/ethercflow/hookfs/hookfs" "github.com/hanwen/go-fuse/fuse" "github.com/sirupsen/logrus" ) type ChaosbladeHookContext struct{} type ChaosbladeHook struct { MountPoint string } func (h *ChaosbladeHook) PreOpen(path string, flags uint32) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "open") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostOpen(int32, hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreRead(path string, length int64, offset int64) ([]byte, bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "read") if err != nil { return nil, true, ctx, err } return nil, false, ctx, nil } func (h *ChaosbladeHook) PostRead(realRetCode int32, realBuf []byte, prehookCtx hookfs.HookContext) ([]byte, bool, error) { return nil, false, nil } func (h *ChaosbladeHook) PreWrite(path string, buf []byte, offset int64) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "write") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostWrite(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreMkdir(path string, mode uint32) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "mkdir") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostMkdir(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreRmdir(path string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "rmdir") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostRmdir(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreOpenDir(path string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "opendir") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostOpenDir(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreFsync(path string, flags uint32) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "fsync") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostFsync(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreFlush(path string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "flush") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostFlush(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreRelease(path string) (bool, hookfs.HookContext) { ctx := &ChaosbladeHookContext{} _ = h.doInjectFault(path, "release") return false, ctx } func (h *ChaosbladeHook) PostRelease(prehookCtx hookfs.HookContext) (hooked bool) { return false } func (h *ChaosbladeHook) PreTruncate(path string, size uint64) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "truncate") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostTruncate(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreGetAttr(path string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "getattr") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostGetAttr(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreChown(path string, uid uint32, gid uint32) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "chown") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostChown(realRetCode int32, prehookCtx hookfs.HookContext) (hooked bool, err error) { return false, nil } func (h *ChaosbladeHook) PreChmod(path string, perms uint32) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "chmod") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostChmod(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreUtimens(path string, atime *time.Time, mtime *time.Time) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "utimens") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostUtimens(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreAllocate(path string, off uint64, size uint64, mode uint32) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "allocate") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostAllocate(realRetCode int32, prehookCtx hookfs.HookContext) (hooked bool, err error) { return false, nil } func (h *ChaosbladeHook) PreGetLk(path string, owner uint64, lk *fuse.FileLock, flags uint32, out *fuse.FileLock) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "getlk") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostGetLk(realRetCode int32, prehookCtx hookfs.HookContext) (hooked bool, err error) { return false, nil } func (h *ChaosbladeHook) PreSetLk(path string, owner uint64, lk *fuse.FileLock, flags uint32) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "setlk") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostSetLk(realRetCode int32, prehookCtx hookfs.HookContext) (hooked bool, err error) { return false, nil } func (h *ChaosbladeHook) PreSetLkw(path string, owner uint64, lk *fuse.FileLock, flags uint32) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "setlkw") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostSetLkw(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreStatFs(path string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(path, "statfs") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostStatFs(prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreReadlink(name string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(name, "readlink") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostReadlink(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreSymlink(value string, linkName string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(value, "symlink") if err != nil { return true, ctx, err } err = h.doInjectFault(linkName, "symlink") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostSymlink(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreCreate(name string, flags uint32, mode uint32) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(name, "create") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostCreate(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreAccess(name string, mode uint32) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(name, "access") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostAccess(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreLink(oldName string, newName string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(oldName, "link") if err != nil { return true, ctx, err } err = h.doInjectFault(newName, "link") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostLink(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreMknod(name string, mode uint32, dev uint32) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(name, "mknod") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostMknod(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreRename(oldName string, newName string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(oldName, "rename") if err != nil { return true, ctx, err } err = h.doInjectFault(newName, "rename") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostRename(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreUnlink(name string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(name, "unlink") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostUnlink(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreGetXAttr(name string, attribute string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(name, "getxattr") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostGetXAttr(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreListXAttr(name string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(name, "listxattr") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostListXAttr(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreRemoveXAttr(name string, attr string) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(name, "removexattr") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostRemoveXAttr(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) PreSetXAttr(name string, attr string, data []byte, flags int) (bool, hookfs.HookContext, error) { ctx := &ChaosbladeHookContext{} err := h.doInjectFault(name, "setxattr") if err != nil { return true, ctx, err } return false, ctx, nil } func (h *ChaosbladeHook) PostSetXAttr(realRetCode int32, prehookCtx hookfs.HookContext) (bool, error) { return false, nil } func (h *ChaosbladeHook) doInjectFault(relativePath, method string) error { logrus.WithFields(logrus.Fields{ "method": method, "relativePath": relativePath, }).Infoln("do Inject fault") val, ok := injectFaultCache.Load(method) if !ok || val == nil { return nil } faultMsg, ok := val.(*InjectMessage) if !ok { logrus.Errorf("convert to InjectMessage failed, %+v", val) return nil } logrus.WithField("faultMessage", faultMsg).Infoln("do Inject fault with inject message") if faultMsg.Path != "" { actualPath := path.Join(h.MountPoint, relativePath) if !strings.HasPrefix(actualPath, faultMsg.Path) { logrus.WithFields(logrus.Fields{ "rulePath": faultMsg.Path, "actualPath": actualPath, }).Infoln("the rule path does not contain the actual path") return nil } } if faultMsg.Percent > 0 && !probab(faultMsg.Percent) { return nil } var err error = nil if faultMsg.Errno != 0 { err = syscall.Errno(faultMsg.Errno) } else if faultMsg.Random { err = randomErrno() } if faultMsg.Delay > 0 { time.Sleep(time.Duration(faultMsg.Delay) * time.Millisecond) } return err } func randomErrno() error { // from E2BIG to EXFULL, notice linux only return syscall.Errno(rand.Intn(0x36-0x7) + 0x7) } func probab(percentage uint32) bool { return rand.Intn(99) < int(percentage) } ================================================ FILE: pkg/hookfs/server.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package hookfs import ( "context" "encoding/json" "fmt" "net/http" "sync" "github.com/sirupsen/logrus" ) var injectFaultCache sync.Map func init() { injectFaultCache = sync.Map{} } type InjectMessage struct { Methods []string `json:"methods"` Path string `json:"path"` Delay uint32 `json:"delay"` Percent uint32 `json:"percent"` Random bool `json:"random"` Errno uint32 `json:"errno"` } type ChaosbladeHookServer struct { addr string } func NewChaosbladeHookServer(addr string) *ChaosbladeHookServer { return &ChaosbladeHookServer{ addr: addr, } } func (s *ChaosbladeHookServer) Start(stop context.Context) error { mux := http.NewServeMux() mux.HandleFunc(InjectPath, s.InjectHandler) mux.HandleFunc(RecoverPath, s.RecoverHandler) errCh := make(chan error) server := &http.Server{ Addr: s.addr, Handler: mux, } go func() { errCh <- server.ListenAndServe() }() for { select { case <-stop.Done(): return server.Shutdown(context.Background()) case err := <-errCh: if err != nil { return err } } } } func (s *ChaosbladeHookServer) InjectHandler(w http.ResponseWriter, r *http.Request) { var injectMsg InjectMessage if err := json.NewDecoder(r.Body).Decode(&injectMsg); err != nil { logrus.WithError(err).Errorf("Cannot Decode Request Message, %+v", r) http.Error(w, "Cannot Decode Request Message", http.StatusBadRequest) return } logrus.WithField("injectMsg", injectMsg).Infoln("Inject Fault") for _, method := range injectMsg.Methods { injectFaultCache.Store(method, &injectMsg) } fmt.Fprintf(w, "success") } func (s *ChaosbladeHookServer) RecoverHandler(w http.ResponseWriter, r *http.Request) { logrus.Infoln("recover all fault") for _, method := range defaultHookPoints { injectFaultCache.Delete(method) } fmt.Fprintf(w, "success") } ================================================ FILE: pkg/hookfs/types.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package hookfs var defaultHookPoints = []string{ "read", "write", "mkdir", "rmdir", "opendir", "fsync", "flush", "release", "truncate", "getattr", "chown", "utimens", "allocate", "getlk", "setlk", "setlkw", "statfs", "readlink", "symlink", "create", "access", "link", "mknod", "rename", "unlink", "getxattr", "listxattr", "removexattr", "setxattr", } var ( InjectPath = "/inject" RecoverPath = "/recover" ) ================================================ FILE: pkg/runtime/chaosblade/chaosblade.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package chaosblade import ( "github.com/spf13/pflag" "github.com/chaosblade-io/chaosblade-operator/version" ) var ( ImageRepository string Version string PullPolicy string DaemonsetEnable bool RemoveBladeInterval string DownloadUrl string ) const ( OperatorChaosBladePath = "/opt/chaosblade" OperatorChaosBladeBin = "/opt/chaosblade/bin" OperatorChaosBladeLib = "/opt/chaosblade/lib" OperatorChaosBladeYaml = "/opt/chaosblade/yaml" OperatorChaosBladeBlade = "/opt/chaosblade/blade" ) const ( DaemonsetPodName = "chaosblade-tool" DefaultRemoveBladeInterval = "72h" ) var DaemonsetPodLabels = map[string]string{ "app": "chaosblade-tool", } // set in runtime var ( DaemonsetPodNamespace string DaemonsetPodNames = map[string]string{} ) var Products = map[string]*ProductConstant{} var Constant *ProductConstant type ProductConstant struct { ImageRepoFunc func() string } var f *pflag.FlagSet func init() { f = pflag.NewFlagSet("chaosblade", pflag.ExitOnError) // chaosblade config f.StringVar(&Version, "chaosblade-version", version.Version, "Chaosblade tool version") f.StringVar(&ImageRepository, "chaosblade-image-repository", "chaosbladeio/chaosblade-tool", "Image repository of chaosblade tool") f.StringVar(&PullPolicy, "chaosblade-image-pull-policy", "IfNotPresent", "Pulling policy of chaosblade image, default value is IfNotPresent.") f.BoolVar(&DaemonsetEnable, "daemonset-enable", false, "Deploy chaosblade daemonset to resolve chaos experiment environment of network, default value is false.") f.StringVar(&RemoveBladeInterval, "remove-blade-interval", DefaultRemoveBladeInterval, "Periodically clean up blade state is destroying, default value is 24h.") f.StringVar(&DownloadUrl, "chaosblade-download-url", "", "The chaosblade downloaded address which works when the chaosblade is deployed in download mode.") f.StringVar(&DaemonsetPodNamespace, "chaosblade-namespace", "chaosblade", "The chaosblade deployment namespace") } func FlagSet() *pflag.FlagSet { return f } ================================================ FILE: pkg/runtime/product/aliyun/aliyun.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package aliyun import ( "fmt" "github.com/spf13/pflag" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" ) var ( // flag RegionId string Environment string ) const ( AHAS = "ahas" prodEnv = "prod" publicRegion = "cn-public" ) var f *pflag.FlagSet func init() { f = pflag.NewFlagSet("aliyun", pflag.ExitOnError) f.StringVar(&RegionId, "aliyun-region-id", "", "Region id for cloud provider") f.StringVar(&Environment, "aliyun-environment", "", "Environment for cloud provider") chaosblade.Products[AHAS] = &chaosblade.ProductConstant{ ImageRepoFunc: ImageRepoForAliyun, } } var ImageRepoForAliyun = func() string { if RegionId == publicRegion { if Environment == prodEnv { return fmt.Sprintf("registry.cn-hangzhou.aliyuncs.com/ahascr-public/chaosblade-tool") } return fmt.Sprintf("registry.cn-hangzhou.aliyuncs.com/ahas-public/chaosblade-tool") } if Environment == prodEnv { return fmt.Sprintf("registry-vpc.%s.aliyuncs.com/ahascr/chaosblade-tool", RegionId) } return fmt.Sprintf("registry-vpc.%s.aliyuncs.com/ahas/chaosblade-tool", RegionId) } func FlagSet() *pflag.FlagSet { return f } ================================================ FILE: pkg/runtime/product/community/community.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package community import ( "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" ) const Community = "community" func init() { chaosblade.Products[Community] = &chaosblade.ProductConstant{ ImageRepoFunc: ImageRepoForCommunity, } } var ImageRepoForCommunity = func() string { return chaosblade.ImageRepository } ================================================ FILE: pkg/runtime/runtime.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package runtime import ( "github.com/spf13/pflag" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/product/aliyun" _ "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/product/community" "github.com/chaosblade-io/chaosblade-operator/version" ) var ( flagSet *pflag.FlagSet LogLevel string MaxConcurrentReconciles int QPS float32 ) func init() { flagSet = pflag.NewFlagSet("operator", pflag.ExitOnError) flagSet.StringVar(&LogLevel, "log-level", "info", "Log level, such as panic|fatal|error|warn|info|debug|trace") flagSet.IntVar(&MaxConcurrentReconciles, "reconcile-count", 20, "Max concurrent reconciles count, default value is 20") flagSet.Float32Var(&QPS, "qps", 20, "qps of kubernetes client") flagSet.AddFlagSet(aliyun.FlagSet()) flagSet.AddFlagSet(chaosblade.FlagSet()) initRuntimeData() } func initRuntimeData() { chaosblade.Constant = chaosblade.Products[version.Product] } func FlagSet() *pflag.FlagSet { return flagSet } ================================================ FILE: pkg/webhook/pod/mutator.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "context" "encoding/json" "fmt" "net/http" "path" "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/chaosblade-io/chaosblade-operator/pkg/runtime/chaosblade" "github.com/chaosblade-io/chaosblade-operator/version" ) var ( FuseServerPort int32 SidecarImage string ) const ( SidecarName = "chaosblade-fuse" FuseServerPortName = "fuse-port" ) // PodMutator set default values for pod type Mutator struct { client client.Client decoder admission.Decoder } func (v *Mutator) Handle(ctx context.Context, req admission.Request) admission.Response { pod := &corev1.Pod{} err := v.decoder.Decode(req, pod) if err != nil { return admission.Errored(http.StatusBadRequest, err) } patchPod := pod.DeepCopy() err = v.mutatePodsFn(patchPod) if err != nil { logrus.WithError(err).Errorln("mutate pod failed") return admission.Errored(http.StatusInternalServerError, err) } originalBytes, err := json.Marshal(pod) if err != nil { logrus.WithError(err).Errorln("Marshal original pod err") return admission.Allowed("") } expectedBytes, err := json.Marshal(patchPod) if err != nil { logrus.WithError(err).Errorln("Marshal patched pod err") } return admission.PatchResponseFromRaw(originalBytes, expectedBytes) } // PodMutator set default values for pod func (v *Mutator) mutatePodsFn(pod *corev1.Pod) error { if pod.Annotations == nil { return nil } injectVolumeName, ok := pod.Annotations["chaosblade/inject-volume"] if !ok { logrus.WithField("name", pod.Name).Infoln("pod has no chaosblade/inject-volume annotation") return nil } injectSubPath, ok := pod.Annotations["chaosblade/inject-volume-subpath"] if !ok { logrus.WithField("name", pod.Name).Infoln("pod has no chaosblade/inject-volume annotation") return nil } for _, container := range pod.Spec.Containers { if container.Name == SidecarName { logrus.WithField("name", pod.Name).Infoln("sidecar has been injected") return nil } } var targetVolumeMount corev1.VolumeMount // inject sidecar for the first container for _, volumeMount := range pod.Spec.Containers[0].VolumeMounts { if volumeMount.Name == injectVolumeName { if volumeMount.MountPropagation == nil { return fmt.Errorf("target volume mount propagation must be HostToContainer or Bidirectional") } if *volumeMount.MountPropagation != corev1.MountPropagationHostToContainer && *volumeMount.MountPropagation != corev1.MountPropagationBidirectional { return fmt.Errorf("target volume mount propagation is not support") } targetVolumeMount = volumeMount mountPropagation := corev1.MountPropagationBidirectional targetVolumeMount.MountPropagation = &mountPropagation } } if targetVolumeMount.Name == "" { return fmt.Errorf("pod has no volume mount %s", injectVolumeName) } privileged := true runAsUser := int64(0) // root mountPoint := path.Join(targetVolumeMount.MountPath, injectSubPath) original := path.Join(targetVolumeMount.MountPath, fmt.Sprintf("fuse-%s", injectSubPath)) logrus.WithFields(logrus.Fields{ "mountPoint": mountPoint, "mountPath": targetVolumeMount.MountPath, "podName": pod.Name, }).Infof("Get matched pod") if mountPoint == targetVolumeMount.MountPath { original = path.Join(path.Dir(targetVolumeMount.MountPath), fmt.Sprintf("fuse-%s", path.Base(targetVolumeMount.MountPath))) } sidecar := corev1.Container{ Name: SidecarName, Image: GetSidecarImage(), ImagePullPolicy: corev1.PullAlways, Command: []string{ "/opt/chaosblade/bin/chaos_fuse", }, Args: []string{ fmt.Sprintf("--address=:%d", FuseServerPort), fmt.Sprintf("--mountpoint=%s", mountPoint), fmt.Sprintf("--original=%s", original), }, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("100m"), corev1.ResourceMemory: resource.MustParse("50Mi"), }, Limits: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("100m"), corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, Ports: []corev1.ContainerPort{ { Name: FuseServerPortName, ContainerPort: FuseServerPort, }, }, SecurityContext: &corev1.SecurityContext{ Privileged: &privileged, RunAsUser: &runAsUser, }, VolumeMounts: []corev1.VolumeMount{ targetVolumeMount, }, } containers := []corev1.Container{} containers = append(containers, sidecar, pod.Spec.Containers[0]) pod.Spec.Containers = containers return nil } // InjectClient injects the client. func (v *Mutator) InjectClient(c client.Client) error { v.client = c return nil } // InjectDecoder injects the decoder. func (v *Mutator) InjectDecoder(d admission.Decoder) error { v.decoder = d return nil } func GetSidecarImage() string { if SidecarImage != "" { return SidecarImage } if chaosblade.Constant != nil { return fmt.Sprintf("%s:%s", chaosblade.Constant.ImageRepoFunc(), version.Version) } // Fallback for testing when chaosblade.Constant is not initialized return fmt.Sprintf("%s:%s", chaosblade.ImageRepository, version.Version) } ================================================ FILE: pkg/webhook/pod/mutator_test.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pod import ( "fmt" "testing" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func Test_mutatePodsFn(t *testing.T) { bidirectional := v1.MountPropagationBidirectional // hostToContainer := v1.MountPropagationHostToContainer None := v1.MountPropagationNone tests := []struct { pod *v1.Pod err error }{ { pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-0", Annotations: map[string]string{ "chaosblade/inject-volume": "fuse-test", "chaosblade/inject-volume-subpath": "data", }, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "test-0", Image: "test-0", VolumeMounts: []v1.VolumeMount{ { Name: "fuse-test", MountPath: "/data", MountPropagation: &bidirectional, }, }, }, }, }, }, err: nil, }, { pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-1", Annotations: map[string]string{ "chaosblade/inject-volume": "fuse-test", "chaosblade/inject-volume-subpath": "/data", }, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "test-1", Image: "test-1", VolumeMounts: []v1.VolumeMount{ { Name: "data", MountPath: "/data", MountPropagation: &bidirectional, }, }, }, }, }, }, err: fmt.Errorf("pod has no volume mount fuse-test"), }, { pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-2", Annotations: map[string]string{ "chaosblade/inject-volume": "data", "chaosblade/inject-volume-subpath": "/data", }, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "test-2", Image: "test-2", VolumeMounts: []v1.VolumeMount{ { Name: "data", MountPath: "/data", }, }, }, }, }, }, err: fmt.Errorf("target volume mount propagation must be HostToContainer or Bidirectional"), }, { pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod-3", Annotations: map[string]string{ "chaosblade/inject-volume": "data", "chaosblade/inject-volume-subpath": "/data", }, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "test-3", Image: "test-3", VolumeMounts: []v1.VolumeMount{ { Name: "data", MountPath: "/data", MountPropagation: &None, }, }, }, }, }, }, err: fmt.Errorf("target volume mount propagation is not support"), }, } mutator := &Mutator{} for _, test := range tests { err := mutator.mutatePodsFn(test.pod) if err != nil && err.Error() != test.err.Error() { t.Errorf("unexpected result %v, expected result: %v", err, test.err) } } } ================================================ FILE: pkg/webhook/webhook.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package webhook import ( "github.com/spf13/pflag" mutator "github.com/chaosblade-io/chaosblade-operator/pkg/webhook/pod" ) var ( Port int Enable bool ) var f *pflag.FlagSet func init() { f = pflag.NewFlagSet("webhook", pflag.ExitOnError) f.StringVar(&mutator.SidecarImage, "fuse-sidecar-image", "", "Fuse sidecar image") f.Int32Var(&mutator.FuseServerPort, "fuse-server-port", 65534, "Fuse server port") f.IntVar(&Port, "webhook-port", 9443, "The port on which to serve HTTPS.") f.BoolVar(&Enable, "webhook-enable", false, "Whether to enable webhook") } func FlagSet() *pflag.FlagSet { return f } ================================================ FILE: scripts/show-version.sh ================================================ #!/bin/bash # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # 版本信息展示脚本 # 用于快速查看当前项目的版本信息 set -e echo "=== ChaosBlade Operator 版本信息 ===" # 获取Git标签版本 GIT_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "无标签") GIT_COMMIT=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown") GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown") BUILD_TIME=$(date -u '+%Y-%m-%dT%H:%M:%SZ') GO_VERSION=$(go version | awk '{print $3}') echo "Git标签: $GIT_TAG" echo "Git提交: $GIT_COMMIT" echo "Git分支: $GIT_BRANCH" echo "构建时间: $BUILD_TIME" echo "Go版本: $GO_VERSION" echo "平台: $(go env GOOS)/$(go env GOARCH)" # 如果存在构建产物,显示其版本信息 if [ -f "target/chaosblade-*/bin/chaosblade-operator" ]; then echo "" echo "=== 构建产物版本信息 ===" target/chaosblade-*/bin/chaosblade-operator --version 2>/dev/null || echo "无法获取构建产物版本信息" fi echo "" echo "=== 构建命令示例 ===" echo "显示版本信息: make show-version" echo "构建二进制: make build_binary" echo "构建Docker镜像: make docker-build" echo "完整构建: make build_all" echo "======================" ================================================ FILE: scripts/version.sh ================================================ #!/bin/bash # Copyright 2025 The ChaosBlade Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # 获取版本信息的脚本 # 用于在构建时注入版本信息到二进制文件中 set -e # 获取Git标签版本 get_git_version() { # 优先使用最新的tag local git_tag=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0") # 移除v前缀 echo "${git_tag#v}" } # 获取Git提交哈希 get_git_commit() { git rev-parse --short HEAD 2>/dev/null || echo "unknown" } # 获取Git分支 get_git_branch() { git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "unknown" } # 获取构建时间 get_build_time() { date -u '+%Y-%m-%dT%H:%M:%SZ' } # 获取Go版本 get_go_version() { go version | awk '{print $3}' } # 获取平台信息 get_platform() { echo "$(go env GOOS)/$(go env GOARCH)" } # 主函数 main() { local version=$(get_git_version) local commit=$(get_git_commit) local branch=$(get_git_branch) local build_time=$(get_build_time) local go_version=$(get_go_version) local platform=$(get_platform) # 输出版本信息,用于Makefile中的ldflags echo "VERSION=$version" echo "GIT_COMMIT=$commit" echo "GIT_BRANCH=$branch" echo "BUILD_TIME=$build_time" echo "GO_VERSION=$go_version" echo "PLATFORM=$platform" } # 如果直接运行此脚本,则输出所有版本信息 if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then main fi ================================================ FILE: version/version.go ================================================ /* * Copyright 2025 The ChaosBlade Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package version import ( "fmt" "strconv" "strings" ) const criVersion = "1.5.0" var ( // 这些变量将在编译时通过 ldflags 注入 Version = "unknown" Product = "community" BuildTime = "unknown" GitCommit = "unknown" GitBranch = "unknown" GoVersion = "unknown" Platform = "unknown" // Version#Product CombinedVersion = "" Delimiter = "," ) func init() { if CombinedVersion != "" { fields := strings.Split(CombinedVersion, Delimiter) if len(fields) > 0 { Version = fields[0] } if len(fields) > 1 { Product = fields[1] } } } // GetVersionInfo 返回完整的版本信息 func GetVersionInfo() map[string]string { return map[string]string{ "version": Version, "product": Product, "buildTime": BuildTime, "gitCommit": GitCommit, "gitBranch": GitBranch, "goVersion": GoVersion, "platform": Platform, } } // GetVersionString 返回格式化的版本字符串 func GetVersionString() string { return fmt.Sprintf("Version: %s, Product: %s, BuildTime: %s, GitCommit: %s, GitBranch: %s, GoVersion: %s, Platform: %s", Version, Product, BuildTime, GitCommit, GitBranch, GoVersion, Platform) } // GetShortVersion 返回简短版本信息 func GetShortVersion() string { return fmt.Sprintf("%s-%s", Version, Product) } func CheckVerisonHaveCriCommand() bool { verisonA := strings.Split(Version, ".") criA := strings.Split(criVersion, ".") if len(verisonA) != 3 { return false } for k, v := range verisonA { vi, err := strconv.Atoi(v) if err != nil { return false } ci, _ := strconv.Atoi(criA[k]) if ci == vi { continue } if vi < ci { return false } return true } return true }