Repository: kastenhq/kubestr Branch: master Commit: e70331629cb5 Files: 67 Total size: 553.9 KB Directory structure: gitextract_7zvi4ff5/ ├── .github/ │ ├── dependabot.yaml │ └── workflows/ │ ├── ci.yml │ ├── dependency-review.yaml │ ├── docker-publish.yml │ ├── ossf-scorecard.yml │ └── release.yaml ├── .goreleaser.yml ├── Dockerfile ├── FIO.md ├── LICENSE ├── README.md ├── _config.yml ├── _posts/ │ └── 2021-02-07-FasterStorage.md ├── cmd/ │ └── rootCmd.go ├── docs/ │ ├── README.md │ └── _config.yml ├── extra/ │ └── csi-drivers ├── go.mod ├── go.sum ├── index.md ├── main.go ├── pkg/ │ ├── block/ │ │ ├── block_mount.go │ │ └── block_mount_test.go │ ├── common/ │ │ └── common.go │ ├── csi/ │ │ ├── csi.go │ │ ├── csi_ops.go │ │ ├── csi_ops_test.go │ │ ├── file_restore_inspector.go │ │ ├── file_restore_inspector_steps_test.go │ │ ├── file_restore_inspector_test.go │ │ ├── mocks/ │ │ │ ├── mock_api_version_fetcher.go │ │ │ ├── mock_application_creator.go │ │ │ ├── mock_argument_validator.go │ │ │ ├── mock_cleaner.go │ │ │ ├── mock_data_validator.go │ │ │ ├── mock_file_restore_stepper.go │ │ │ ├── mock_kube_executor.go │ │ │ ├── mock_port_forwarder.go │ │ │ ├── mock_pvc_browser_stepper.go │ │ │ ├── mock_snapshot_browser_stepper.go │ │ │ ├── mock_snapshot_creator.go │ │ │ └── mock_snapshot_restore_stepper.go │ │ ├── pvc_inspector.go │ │ ├── pvc_inspector_steps_test.go │ │ ├── pvc_inspector_test.go │ │ ├── snapshot_inspector.go │ │ ├── snapshot_inspector_steps_test.go │ │ ├── snapshot_inspector_test.go │ │ ├── snapshot_restore.go │ │ ├── snapshot_restore_steps_test.go │ │ ├── snapshot_restore_test.go │ │ └── types/ │ │ └── csi_types.go │ ├── fio/ │ │ ├── _config.yml │ │ ├── dbench_license │ │ ├── fio.go │ │ ├── fio_jobs.go │ │ ├── fio_test.go │ │ ├── fio_types.go │ │ └── parsable_fio_output.go │ └── kubestr/ │ ├── csi-drivers.go │ ├── kubernetes_checks.go │ ├── kubernetes_checks_test.go │ ├── kubestr.go │ ├── storage_provisioners.go │ ├── storage_provisioners_test.go │ └── utils.go └── scripts/ └── load_csi_provisioners.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/dependabot.yaml ================================================ version: 2 updates: - package-ecosystem: gomod commit-message: prefix: "deps(go):" directory: "/" ignore: # Avoids unnecessarily auto-creating PRs for k8s dependencies, as these # will be closed since k8s dependencies need to be updated all at once # starting with kanister and go through additional validation. - dependency-name: "k8s.io/*" - dependency-name: "sigs.k8s.io/*" open-pull-requests-limit: 5 schedule: interval: daily - package-ecosystem: github-actions commit-message: prefix: "deps(actions):" directory: "/" open-pull-requests-limit: 3 schedule: interval: monthly groups: github-actions: patterns: - "actions/*" - "github/codeql-action" docker: patterns: - "docker/*" - package-ecosystem: docker commit-message: prefix: "deps(docker):" directory: "/" open-pull-requests-limit: 4 schedule: interval: monthly groups: all: patterns: - "*" ================================================ FILE: .github/workflows/ci.yml ================================================ name: CI on: push: branches: - main - master tags: - v* pull_request: permissions: contents: read jobs: build: name: Build runs-on: ubuntu-latest steps: - name: Check out code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Set up Go uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 with: go-version-file: 'go.mod' id: go - name: Build run: go build -v . - name: Test run: go test -v ./... lint: name: Lint runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: Set up Go uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 with: go-version-file: 'go.mod' - name: golangci-lint uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. version: v2.2.1 args: --timeout=5m --modules-download-mode=mod skip-cache: true ================================================ FILE: .github/workflows/dependency-review.yaml ================================================ # Dependency Review Action # # This workflow scans dependency manifest files that change as part of a pull # reqest, surfacing known-vulnerable versions of the packages declared or # updated in the PR. # If the workflow run is marked as required, PRs introducing known-vulnerable # packages will be blocked from merging. # # Source repository: https://github.com/actions/dependency-review-action # Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement # name: 'Dependency Review' on: [pull_request] permissions: contents: read jobs: dependency-review: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - name: 'Dependency Review' uses: actions/dependency-review-action@2031cfc080254a8a887f58cffee85186f0e49e48 # v4.9.0 ================================================ FILE: .github/workflows/docker-publish.yml ================================================ name: Docker permissions: contents: read on: push: branches: - main - master # Publish `v1.2.3` tags as releases. tags: - v* pull_request: env: REGISTRY: ghcr.io IMAGE_NAME: ${{ github.repository }} jobs: push: permissions: packages: write contents: read runs-on: ubuntu-latest steps: - name: Check out code into the Go module directory uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 # Extract metadata (tags, labels) for Docker # https://github.com/docker/metadata-action - name: Extract Docker metadata id: meta uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} # This action can be useful if you want to add emulation # support with QEMU to be able to build against more platforms. - name: Set up QEMU uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0 # This action will create and boot a builder using # by default the docker-container builder driver. # Recommended for build multi-platform images, export cache, etc. - name: Set up Docker Buildx uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 - name: Log into ${{ env.REGISTRY }} if: github.event_name != 'pull_request' uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} # Build and push Docker image with Buildx (don't push on PR) # https://github.com/docker/build-push-action - name: Build and push Docker image uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0 with: platforms: linux/amd64,linux/arm64,linux/ppc64le context: . push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} ================================================ FILE: .github/workflows/ossf-scorecard.yml ================================================ name: OSSF Scorecard on: # For Branch-Protection check. Only the default branch is supported. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection branch_protection_rule: push: branches: [ "master" ] # To guarantee Maintained check is occasionally updated. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained schedule: - cron: '25 6 * * 5' workflow_dispatch: inputs: ref: description: 'branch or git ref to use for the build' required: true default: 'master' # Declare default permissions as read only. permissions: read-all jobs: analysis: name: Scorecard analysis runs-on: ubuntu-latest permissions: # Needed to upload the results to code-scanning dashboard. security-events: write # Needed to publish results and get a badge id-token: write steps: - name: "Checkout repo" uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: "Run analysis" uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 with: results_file: results.sarif results_format: sarif publish_results: true - # Upload the results to GitHub's code scanning dashboard. name: "Upload to results to dashboard" uses: github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1 with: sarif_file: results.sarif - name: "Upload analysis results as 'Job Artifact'" uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 with: name: SARIF file path: results.sarif retention-days: 5 ================================================ FILE: .github/workflows/release.yaml ================================================ name: Release permissions: contents: read on: release: types: - created - published jobs: goreleaser: name: Release Go Binary runs-on: ubuntu-latest permissions: contents: write steps: - name: Checkout uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: fetch-depth: 0 - name: Set up Go uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 with: go-version-file: 'go.mod' - name: Run GoReleaser uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7.0.0 with: distribution: goreleaser version: latest args: release --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} ================================================ FILE: .goreleaser.yml ================================================ # This is an example goreleaser.yaml file with some sane defaults. # Make sure to check the documentation at http://goreleaser.com before: hooks: # You may remove this if you don't use go modules. - go mod download builds: - env: - CGO_ENABLED=0 - GO_EXTLINK_ENABLED=0 goos: - linux - windows - darwin goarch: - amd64 - arm64 archives: - name_template: >- {{ .ProjectName }}_ {{- .Version }}_ {{- if eq .Os "darwin" }}MacOS {{- else if eq .Os "linux" }}Linux {{- else if eq .Os "windows" }}Windows {{- else }}{{ .Os }}{{ end }}_ {{- .Arch }} checksum: name_template: 'checksums.txt' snapshot: name_template: "{{ .Tag }}-next" changelog: sort: asc filters: exclude: - '^docs:' - '^test:' ================================================ FILE: Dockerfile ================================================ ARG BUILDPLATFROM FROM --platform=$BUILDPLATFORM golang:1.26.1-bookworm@sha256:8e8aa801e8417ef0b5c42b504dd34db3db911bb73dba933bd8bde75ed815fdbb AS builder ARG TARGETOS ARG TARGETARCH ARG TARGETPLATFROM ENV GO111MODULE=on \ CGO_ENABLED=0 \ GOOS=${TARGETOS} \ GOARCH=${TARGETARCH} WORKDIR /app COPY go.mod . COPY go.sum . RUN go mod download COPY . . RUN go build -o /dist/kubestr -ldflags="-w -s" . FROM alpine:3.23@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 RUN apk --no-cache add fio COPY --from=builder /dist/kubestr / ENTRYPOINT ["/kubestr"] ================================================ FILE: FIO.md ================================================ # FIO [![asciicast](https://asciinema.org/a/D9EFwlEUVx787hayFapdHljBW.svg)](https://asciinema.org/a/D9EFwlEUVx787hayFapdHljBW) ## More info coming soon ## Examples of FIO files- Here are some [examples](https://github.com/axboe/fio/tree/master/examples) ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ # Kubestr ## What is it? Kubestr is a collection of tools to discover, validate and evaluate your kubernetes storage options. As adoption of kubernetes grows so have the persistent storage offerings that are available to users. The introduction of [CSI](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/) (Container Storage Interface) has enabled storage providers to develop drivers with ease. In fact there are around a 100 different CSI drivers available today. Along with the existing in-tree providers, these options can make choosing the right storage difficult. Kubestr can assist in the following ways- - Identify the various storage options present in a cluster. - Validate if the storage options are configured correctly. - Evaluate the storage using common benchmarking tools like FIO. [![asciicast](https://asciinema.org/a/7iJTbWKwdhPHNWYV00LIgx7gn.svg)](https://asciinema.org/a/7iJTbWKwdhPHNWYV00LIgx7gn) ## Resources Video * [Cloud Native Live: Introducing Kubestr – A New Way to Explore your Kubernetes Storage Options](https://youtu.be/N79NY_0aO0w) * [Introducing Kubestr - A handy tool for Kubernetes Storage](https://youtu.be/U3Rt9vcuQdc) * [A new way to benchmark your kubernetes storage DoK Talks #71](https://www.youtube.com/watch?v=g64eIOk_Ob4) Blogs * [Benchmarking and Evaluating Your Kubernetes Storage with Kubestr](https://blog.kasten.io/benchmarking-kubernetes-storage-with-kubestr) * [Kubestr: The Easy Button for Validating and Debugging Your Storage in Kubernetes](https://thenewstack.io/kubestr-the-easy-button-for-validating-and-debugging-your-storage-in-kubernetes/) * [Introducing Kubestr - A handy tool for Kubernetes Storage](https://vzilla.co.uk/vzilla-blog/introducing-kubestr-a-handy-tool-for-kubernetes-storage) ## Using Kubestr ### To install the tool - - Ensure that the kubernetes context is set and the cluster is accessible through your terminal. (Does [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) work?) - Download the latest release [here](https://github.com/kastenhq/kubestr/releases/latest). - Unpack the tool and make it an executable `chmod +x kubestr`. ### To discover available storage options - - Run `./kubestr` ### To run an FIO test - - Run `./kubestr fio -s ` - Additional options like `--size` and `--fiofile` can be specified. - For more information visit our [fio](https://github.com/kastenhq/kubestr/blob/master/FIO.md) page. ### To check a CSI drivers snapshot and restore capabilities - - Run `./kubestr csicheck -s -v ` ### To check if a StorageClass supports a block mount - - Run `./kubestr blockmount -s StorageClass` ## Roadmap - In the future we plan to allow users to post their FIO results and compare to others. ================================================ FILE: _config.yml ================================================ theme: jekyll-theme-cayman title: Kubestr description: Explore your kubernetes storage options ================================================ FILE: _posts/2021-02-07-FasterStorage.md ================================================ --- layout: post title: "Faster Storage" date: 2021-02-07 published: true categories: fio storage --- Some content other ================================================ FILE: cmd/rootCmd.go ================================================ // Copyright 2020 Kubestr Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "context" "encoding/json" "fmt" "os" "time" "github.com/kastenhq/kubestr/pkg/block" "github.com/kastenhq/kubestr/pkg/csi" csitypes "github.com/kastenhq/kubestr/pkg/csi/types" "github.com/kastenhq/kubestr/pkg/fio" "github.com/kastenhq/kubestr/pkg/kubestr" "github.com/spf13/cobra" ) var ( output string outfile string rootCmd = &cobra.Command{ Use: "kubestr", Short: "A tool to validate kubernetes storage", Long: `kubestr is a tool that will scan your k8s cluster and validate that the storage systems in place as well as run performance tests.`, SilenceUsage: true, Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, args []string) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() return Baseline(ctx, output) }, } storageClass string namespace string containerImage string fioCheckerSize string fioNodeSelector map[string]string fioCheckerFilePath string fioCheckerTestName string fioCmd = &cobra.Command{ Use: "fio", Short: "Runs an fio test", Long: `Run an fio test`, Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, args []string) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() return Fio(ctx, output, outfile, storageClass, fioCheckerSize, namespace, fioNodeSelector, fioCheckerTestName, fioCheckerFilePath, containerImage) }, } csiCheckVolumeSnapshotClass string csiCheckRunAsUser int64 csiCheckCleanup bool csiCheckSkipCFSCheck bool csiCheckCmd = &cobra.Command{ Use: "csicheck", Short: "Runs the CSI snapshot restore check", Long: "Validates a CSI provisioners ability to take a snapshot of an application and restore it", Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, args []string) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() return CSICheck(ctx, output, outfile, namespace, storageClass, csiCheckVolumeSnapshotClass, csiCheckRunAsUser, containerImage, csiCheckCleanup, csiCheckSkipCFSCheck) }, } browseLocalPort int browseCmd = &cobra.Command{ Use: "browse", Short: "Browse the contents of PVC or VolumeSnapshot", Long: "Browse the contents of a CSI provisioned PVC or a CSI provisioned VolumeSnapshot.", Deprecated: "use 'browse pvc' instead", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { return browsePvcCmd.RunE(cmd, args) }, } showTree bool browsePvcCmd = &cobra.Command{ Use: "pvc [PVC name]", Short: "Browse the contents of a CSI PVC via file browser", Long: "Browse the contents of a CSI provisioned PVC by cloning the volume and mounting it with a file browser.", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { return CsiPvcBrowse(context.Background(), args[0], namespace, csiCheckVolumeSnapshotClass, csiCheckRunAsUser, browseLocalPort, showTree, ) }, } browseSnapshotCmd = &cobra.Command{ Use: "snapshot [Snapshot name]", Short: "Browse the contents of a CSI VolumeSnapshot via file browser", Long: "Browse the contents of a CSI provisioned VolumeSnapshot by cloning the volume and mounting it with a file browser.", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { return CsiSnapshotBrowse(context.Background(), args[0], namespace, csiCheckRunAsUser, browseLocalPort, showTree, ) }, } fromSnapshot string fromPVC string toPVC string path string restoreFileCmd = &cobra.Command{ Use: "file-restore", Short: "Restore file(s) from a Snapshot or PVC to it's source PVC", Long: "Restore file(s) from a given CSI provisioned VolumeSnapshot or PersistentVolumeClaim to another PersistentVolumeClaim.", Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, args []string) error { return FileRestore(context.Background(), fromSnapshot, fromPVC, toPVC, namespace, csiCheckRunAsUser, browseLocalPort, path) }, } blockMountRunAsUser int64 blockMountCleanup bool blockMountCleanupOnly bool blockMountWaitTimeoutSeconds uint32 blockMountPVCSize string blockMountCmd = &cobra.Command{ Use: "blockmount", Short: "Checks if a storage class supports block volumes", Long: `Checks if volumes provisioned by a storage class can be mounted in block mode. The checker works as follows: - It dynamically provisions a volume of the given storage class. - It then launches a pod with the volume mounted as a block device. - If the pod is successfully created then the test passes. - If the pod fails or times out then the test fails. In case of failure, re-run the checker with the "-c=false" flag and examine the failed PVC and Pod: it may be necessary to adjust the default values used for the PVC size, the pod wait timeout, etc. Clean up the failed resources by running the checker with the "--cleanup-only" flag. `, Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, args []string) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() checkerArgs := block.BlockMountCheckerArgs{ StorageClass: storageClass, Namespace: namespace, Cleanup: blockMountCleanup, RunAsUser: blockMountRunAsUser, ContainerImage: containerImage, K8sObjectReadyTimeout: (time.Second * time.Duration(blockMountWaitTimeoutSeconds)), PVCSize: blockMountPVCSize, } return BlockMountCheck(ctx, output, outfile, blockMountCleanupOnly, checkerArgs) }, } ) func init() { rootCmd.PersistentFlags().StringVarP(&output, "output", "o", "", "Options(json)") rootCmd.PersistentFlags().StringVarP(&outfile, "outfile", "e", "", "The file where test results will be written") rootCmd.AddCommand(fioCmd) fioCmd.Flags().StringVarP(&storageClass, "storageclass", "s", "", "The name of a Storageclass. (Required)") _ = fioCmd.MarkFlagRequired("storageclass") fioCmd.Flags().StringVarP(&fioCheckerSize, "size", "z", fio.DefaultPVCSize, "The size of the volume used to run FIO. Note that the FIO job definition is not scaled accordingly.") fioCmd.Flags().StringVarP(&namespace, "namespace", "n", fio.DefaultNS, "The namespace used to run FIO.") fioCmd.Flags().StringToStringVarP(&fioNodeSelector, "nodeselector", "N", map[string]string{}, "Node selector applied to pod.") fioCmd.Flags().StringVarP(&fioCheckerFilePath, "fiofile", "f", "", "The path to a an fio config file.") fioCmd.Flags().StringVarP(&fioCheckerTestName, "testname", "t", "", "The Name of a predefined kubestr fio test. Options(default-fio)") fioCmd.Flags().StringVarP(&containerImage, "image", "i", "", "The container image used to create a pod.") rootCmd.AddCommand(csiCheckCmd) csiCheckCmd.Flags().StringVarP(&storageClass, "storageclass", "s", "", "The name of a Storageclass. (Required)") _ = csiCheckCmd.MarkFlagRequired("storageclass") csiCheckCmd.Flags().StringVarP(&csiCheckVolumeSnapshotClass, "volumesnapshotclass", "v", "", "The name of a VolumeSnapshotClass. (Required)") _ = csiCheckCmd.MarkFlagRequired("volumesnapshotclass") csiCheckCmd.Flags().StringVarP(&namespace, "namespace", "n", fio.DefaultNS, "The namespace used to run the check.") csiCheckCmd.Flags().StringVarP(&containerImage, "image", "i", "", "The container image used to create a pod.") csiCheckCmd.Flags().BoolVarP(&csiCheckCleanup, "cleanup", "c", true, "Clean up the objects created by tool") csiCheckCmd.Flags().Int64VarP(&csiCheckRunAsUser, "runAsUser", "u", 0, "Runs the CSI check pod with the specified user ID (int)") csiCheckCmd.Flags().BoolVarP(&csiCheckSkipCFSCheck, "skipCFScheck", "k", false, "Use this flag to skip validating the ability to clone a snapshot.") rootCmd.AddCommand(browseCmd) browseCmd.Flags().StringVarP(&csiCheckVolumeSnapshotClass, "volumesnapshotclass", "v", "", "The name of a VolumeSnapshotClass. (Required)") _ = browseCmd.MarkFlagRequired("volumesnapshotclass") browseCmd.PersistentFlags().StringVarP(&namespace, "namespace", "n", fio.DefaultNS, "The namespace of the resource to browse.") browseCmd.PersistentFlags().Int64VarP(&csiCheckRunAsUser, "runAsUser", "u", 0, "Runs the inspector pod as a user (int)") browseCmd.PersistentFlags().IntVarP(&browseLocalPort, "localport", "l", 8080, "The local port to expose the inspector") browseCmd.PersistentFlags().BoolVarP(&showTree, "show-tree", "t", false, "Prints the contents of given PVC or VolumeSnapshot") browseCmd.AddCommand(browsePvcCmd) browsePvcCmd.Flags().StringVarP(&csiCheckVolumeSnapshotClass, "volumesnapshotclass", "v", "", "The name of a VolumeSnapshotClass. (Required)") _ = browsePvcCmd.MarkFlagRequired("volumesnapshotclass") browseCmd.AddCommand(browseSnapshotCmd) rootCmd.AddCommand(restoreFileCmd) restoreFileCmd.Flags().StringVarP(&fromSnapshot, "fromSnapshot", "f", "", "The name of a VolumeSnapshot.") restoreFileCmd.Flags().StringVarP(&fromPVC, "fromPVC", "v", "", "The name of a PersistentVolumeClaim.") restoreFileCmd.MarkFlagsMutuallyExclusive("fromSnapshot", "fromPVC") restoreFileCmd.MarkFlagsOneRequired("fromSnapshot", "fromPVC") restoreFileCmd.Flags().StringVarP(&toPVC, "toPVC", "t", "", "The name of a PersistentVolumeClaim.") restoreFileCmd.Flags().StringVarP(&namespace, "namespace", "n", fio.DefaultNS, "The namespace of both the given PVC & VS.") restoreFileCmd.Flags().Int64VarP(&csiCheckRunAsUser, "runAsUser", "u", 0, "Runs the inspector pod as a user (int)") restoreFileCmd.Flags().IntVarP(&browseLocalPort, "localport", "l", 8080, "The local port to expose the inspector") restoreFileCmd.Flags().StringVarP(&path, "path", "p", "", "Path of a file or directory that needs to be restored") rootCmd.AddCommand(blockMountCmd) blockMountCmd.Flags().StringVarP(&storageClass, "storageclass", "s", "", "The name of a StorageClass. (Required)") _ = blockMountCmd.MarkFlagRequired("storageclass") blockMountCmd.Flags().StringVarP(&namespace, "namespace", "n", fio.DefaultNS, "The namespace used to run the check.") blockMountCmd.Flags().StringVarP(&containerImage, "image", "i", "", "The container image used to create a pod.") blockMountCmd.Flags().BoolVarP(&blockMountCleanup, "cleanup", "c", true, "Clean up the objects created by the check.") blockMountCmd.Flags().BoolVarP(&blockMountCleanupOnly, "cleanup-only", "", false, "Do not run the checker, but just clean up resources left from a previous invocation.") blockMountCmd.Flags().Int64VarP(&blockMountRunAsUser, "runAsUser", "u", 0, "Runs the block mount check pod with the specified user ID (int)") blockMountCmd.Flags().Uint32VarP(&blockMountWaitTimeoutSeconds, "wait-timeout", "w", 60, "Max time in seconds to wait for the check pod to become ready") blockMountCmd.Flags().StringVarP(&blockMountPVCSize, "pvc-size", "", "1Gi", "The size of the provisioned PVC.") } // Execute executes the main command func Execute() error { return rootCmd.Execute() } // Baseline executes the baseline check func Baseline(ctx context.Context, output string) error { p, err := kubestr.NewKubestr() if err != nil { fmt.Println(err.Error()) return err } fmt.Print(kubestr.Logo) result := p.KubernetesChecks() if PrintAndJsonOutput(result, output, outfile) { return err } for _, retval := range result { retval.Print() fmt.Println() time.Sleep(500 * time.Millisecond) } provisionerList, err := p.ValidateProvisioners(ctx) if err != nil { fmt.Println(err.Error()) return err } fmt.Println("Available Storage Provisioners:") fmt.Println() time.Sleep(500 * time.Millisecond) // Added to introduce lag. for _, provisioner := range provisionerList { provisioner.Print() fmt.Println() time.Sleep(500 * time.Millisecond) } return err } // PrintAndJsonOutput Print JSON output to stdout and to file if arguments say so // Returns whether we have generated output or JSON func PrintAndJsonOutput(result []*kubestr.TestOutput, output string, outfile string) bool { if output == "json" { jsonRes, _ := json.MarshalIndent(result, "", " ") if len(outfile) > 0 { err := os.WriteFile(outfile, jsonRes, 0666) if err != nil { fmt.Println("Error writing output:", err.Error()) os.Exit(2) } } else { fmt.Println(string(jsonRes)) } return true } return false } // Fio executes the FIO test. func Fio(ctx context.Context, output, outfile, storageclass, size, namespace string, nodeSelector map[string]string, jobName, fioFilePath string, containerImage string) error { cli, err := kubestr.LoadKubeCli() if err != nil { fmt.Println(err.Error()) return err } fioRunner := &fio.FIOrunner{ Cli: cli, } testName := "FIO test results" var result *kubestr.TestOutput fioResult, err := fioRunner.RunFio(ctx, &fio.RunFIOArgs{ StorageClass: storageclass, Size: size, Namespace: namespace, NodeSelector: nodeSelector, FIOJobName: jobName, FIOJobFilepath: fioFilePath, Image: containerImage, }) if err != nil { result = kubestr.MakeTestOutput(testName, kubestr.StatusError, err.Error(), fioResult) } else { result = kubestr.MakeTestOutput(testName, kubestr.StatusOK, fmt.Sprintf("\n%s", fioResult.Result.Print()), fioResult) } var wrappedResult = []*kubestr.TestOutput{result} if !PrintAndJsonOutput(wrappedResult, output, outfile) { result.Print() } return err } func CSICheck(ctx context.Context, output, outfile, namespace string, storageclass string, volumesnapshotclass string, runAsUser int64, containerImage string, cleanup bool, skipCFScheck bool, ) error { testName := "CSI checker test" kubecli, err := kubestr.LoadKubeCli() if err != nil { fmt.Printf("Failed to load kubeCli (%s)", err.Error()) return err } dyncli, err := kubestr.LoadDynCli() if err != nil { fmt.Printf("Failed to load dynCli (%s)", err.Error()) return err } csiCheckRunner := &csi.SnapshotRestoreRunner{ KubeCli: kubecli, DynCli: dyncli, } var result *kubestr.TestOutput csiCheckResult, err := csiCheckRunner.RunSnapshotRestore(ctx, &csitypes.CSISnapshotRestoreArgs{ StorageClass: storageclass, VolumeSnapshotClass: volumesnapshotclass, Namespace: namespace, RunAsUser: runAsUser, ContainerImage: containerImage, Cleanup: cleanup, SkipCFSCheck: skipCFScheck, }) if err != nil { result = kubestr.MakeTestOutput(testName, kubestr.StatusError, err.Error(), csiCheckResult) } else { result = kubestr.MakeTestOutput(testName, kubestr.StatusOK, "CSI application successfully snapshotted and restored.", csiCheckResult) } var wrappedResult = []*kubestr.TestOutput{result} if !PrintAndJsonOutput(wrappedResult, output, outfile) { result.Print() } return err } func CsiPvcBrowse(ctx context.Context, pvcName string, namespace string, volumeSnapshotClass string, runAsUser int64, localPort int, showTree bool, ) error { kubecli, err := kubestr.LoadKubeCli() if err != nil { fmt.Printf("Failed to load kubeCli (%s)", err.Error()) return err } dyncli, err := kubestr.LoadDynCli() if err != nil { fmt.Printf("Failed to load dynCli (%s)", err.Error()) return err } browseRunner := &csi.PVCBrowseRunner{ KubeCli: kubecli, DynCli: dyncli, } err = browseRunner.RunPVCBrowse(ctx, &csitypes.PVCBrowseArgs{ PVCName: pvcName, Namespace: namespace, VolumeSnapshotClass: volumeSnapshotClass, RunAsUser: runAsUser, LocalPort: localPort, ShowTree: showTree, }) if err != nil { fmt.Printf("Failed to run PVC browser (%s)\n", err.Error()) } return err } func CsiSnapshotBrowse(ctx context.Context, snapshotName string, namespace string, runAsUser int64, localPort int, showTree bool, ) error { kubecli, err := kubestr.LoadKubeCli() if err != nil { fmt.Printf("Failed to load kubeCli (%s)", err.Error()) return err } dyncli, err := kubestr.LoadDynCli() if err != nil { fmt.Printf("Failed to load dynCli (%s)", err.Error()) return err } browseRunner := &csi.SnapshotBrowseRunner{ KubeCli: kubecli, DynCli: dyncli, } err = browseRunner.RunSnapshotBrowse(ctx, &csitypes.SnapshotBrowseArgs{ SnapshotName: snapshotName, Namespace: namespace, RunAsUser: runAsUser, LocalPort: localPort, ShowTree: showTree, }) if err != nil { fmt.Printf("Failed to run Snapshot browser (%s)\n", err.Error()) } return err } func FileRestore(ctx context.Context, fromSnapshotName string, fromPVCName string, toPVCName string, namespace string, runAsUser int64, localPort int, path string, ) error { kubecli, err := kubestr.LoadKubeCli() if err != nil { fmt.Printf("Failed to load kubeCli (%s)", err.Error()) return err } dyncli, err := kubestr.LoadDynCli() if err != nil { fmt.Printf("Failed to load dynCli (%s)", err.Error()) return err } fileRestoreRunner := &csi.FileRestoreRunner{ KubeCli: kubecli, DynCli: dyncli, } err = fileRestoreRunner.RunFileRestore(ctx, &csitypes.FileRestoreArgs{ FromSnapshotName: fromSnapshotName, FromPVCName: fromPVCName, ToPVCName: toPVCName, Namespace: namespace, RunAsUser: runAsUser, LocalPort: localPort, Path: path, }) if err != nil { fmt.Printf("Failed to run file-restore (%s)\n", err.Error()) } return err } func BlockMountCheck(ctx context.Context, output, outfile string, cleanupOnly bool, checkerArgs block.BlockMountCheckerArgs) error { kubecli, err := kubestr.LoadKubeCli() if err != nil { fmt.Printf("Failed to load kubeCli (%s)", err.Error()) return err } checkerArgs.KubeCli = kubecli dyncli, err := kubestr.LoadDynCli() if err != nil { fmt.Printf("Failed to load dynCli (%s)", err.Error()) return err } checkerArgs.DynCli = dyncli blockMountTester, err := block.NewBlockMountChecker(checkerArgs) if err != nil { fmt.Printf("Failed to initialize BlockMounter (%s)", err.Error()) return err } if cleanupOnly { blockMountTester.Cleanup() return nil } var ( testName = "Block VolumeMode test" result *kubestr.TestOutput ) mountResult, err := blockMountTester.Mount(ctx) if err != nil { if !checkerArgs.Cleanup { fmt.Printf("Warning: Resources may not have been released. Rerun with the additional --cleanup-only flag.\n") } result = kubestr.MakeTestOutput(testName, kubestr.StatusError, fmt.Sprintf("StorageClass (%s) does not appear to support Block VolumeMode", checkerArgs.StorageClass), mountResult) } else { result = kubestr.MakeTestOutput(testName, kubestr.StatusOK, fmt.Sprintf("StorageClass (%s) supports Block VolumeMode", checkerArgs.StorageClass), mountResult) } var wrappedResult = []*kubestr.TestOutput{result} if !PrintAndJsonOutput(wrappedResult, output, outfile) { result.Print() } return err } ================================================ FILE: docs/README.md ================================================ # Kubestr Kubestr is a tool that qualifies the storage options present in a cluster. For more options visit kubestr.io ================================================ FILE: docs/_config.yml ================================================ theme: jekyll-theme-hacker ================================================ FILE: extra/csi-drivers ================================================ # Drivers The following are a set of CSI driver which can be used with Kubernetes: > NOTE: If you would like your driver to be added to this table, please open a pull request in [this repo](https://github.com/kubernetes-csi/docs/pulls) updating this file. Other Features is allowed to be filled in Raw Block, Snapshot, Expansion and Cloning. If driver did not implement any Other Features, please leave it blank. > DISCLAIMER: Information in this table has not been validated by Kubernetes SIG-Storage. Users who want to use these CSI drivers need to contact driver maintainers for driver capabilities. ## Production Drivers Name | CSI Driver Name | Compatible with CSI Version(s) | Description | Persistence (Beyond Pod Lifetime) | Supported Access Modes | Dynamic Provisioning | Other Features -----|-----------------|--------------------------------|-------------|-----------------------------------|------------------------|----------------------|-------- [Alicloud Disk](https://github.com/AliyunContainerService/csi-plugin) | `diskplugin.csi.alibabacloud.com` | v1.0 | A Container Storage Interface (CSI) Driver for Alicloud Disk | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot [Alicloud NAS](https://github.com/AliyunContainerService/csi-plugin) | `nasplugin.csi.alibabacloud.com` | v1.0 | A Container Storage Interface (CSI) Driver for Alicloud Network Attached Storage (NAS) | Persistent | Read/Write Multiple Pods | No | [Alicloud OSS](https://github.com/AliyunContainerService/csi-plugin)| `ossplugin.csi.alibabacloud.com` | v1.0 | A Container Storage Interface (CSI) Driver for Alicloud Object Storage Service (OSS) | Persistent | Read/Write Multiple Pods | No | [ArStor CSI](https://github.com/huayun-docs/csi-driver-arstor) | `arstor.csi.huayun.io` | v1.0 | A Container Storage Interface (CSI) Driver for Huayun Storage Service (ArStor) | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning [AWS Elastic Block Storage](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) | `ebs.csi.aws.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for AWS Elastic Block Storage (EBS) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion [AWS Elastic File System](https://github.com/aws/aws-efs-csi-driver) | `efs.csi.aws.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for AWS Elastic File System (EFS) | Persistent | Read/Write Multiple Pods | No | [AWS FSx for Lustre](https://github.com/aws/aws-fsx-csi-driver) | `fsx.csi.aws.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for AWS FSx for Lustre (EBS) | Persistent | Read/Write Multiple Pods | Yes | [Azure disk](https://github.com/kubernetes-sigs/azuredisk-csi-driver) | `disk.csi.azure.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Azure disk | Persistent | Read/Write Single Pod | Yes | [Azure file](https://github.com/kubernetes-sigs/azurefile-csi-driver) | `file.csi.azure.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Azure file | Persistent | Read/Write Multiple Pods | Yes | [BeeGFS](https://github.com/NetApp/beegfs-csi-driver) | `beegfs.csi.netapp.com` | v1.3 | A Container Storage Interface (CSI) Driver for the [BeeGFS](https://www.beegfs.io/) Parallel File System | Persistent | Read/Write Multiple Pods | Yes | [Bigtera VirtualStor (block)](https://github.com/bigtera-ce/ceph-csi) | `csi.block.bigtera.com` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI) Driver for Bigtera VirtualStor block storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion [Bigtera VirtualStor (filesystem)](https://github.com/bigtera-ce/ceph-csi) | `csi.fs.bigtera.com` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI) Driver for Bigtera VirtualStor filesystem | Persistent | Read/Write Multiple Pods | Yes | Expansion [BizFlyCloud Block Storage](https://github.com/bizflycloud/csi-bizflycloud) | `volume.csi.bizflycloud.vn` | v1.2 | A Container Storage Interface (CSI) Driver for BizFly Cloud block storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion [CephFS](https://github.com/ceph/ceph-csi) | `cephfs.csi.ceph.com` | v0.3, >=v1.0.0 | A Container Storage Interface (CSI) Driver for CephFS | Persistent | Read/Write Multiple Pods | Yes | Expansion, Snapshot, Cloning [Ceph RBD](https://github.com/ceph/ceph-csi) | `rbd.csi.ceph.com` | v0.3, >=v1.0.0 | A Container Storage Interface (CSI) Driver for Ceph RBD | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology, Cloning [ChubaoFS](https://github.com/chubaofs/chubaofs-csi) | `csi.chubaofs.com` | v1.0.0 | A Container Storage Interface (CSI) Driver for ChubaoFS Storage | Persistent | Read/Write Multiple Pods | Yes | [Cinder](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/cinder) | `cinder.csi.openstack.org` | v0.3, v1.0, v1.1.0, v1.2.0, v1.3.0 | A Container Storage Interface (CSI) Driver for OpenStack Cinder | Persistent and Ephemeral | Depends on the storage backend used | Yes, if storage backend supports it | Raw Block, Snapshot, Expansion, Cloning, Topology [cloudscale.ch](https://github.com/cloudscale-ch/csi-cloudscale) | `csi.cloudscale.ch` | v1.0 | A Container Storage Interface (CSI) Driver for the [cloudscale.ch](https://www.cloudscale.ch/) IaaS platform | Persistent | Read/Write Single Pod | Yes |Snapshot [Datatom-InfinityCSI](https://github.com/datatom-infinity/infinity-csi) | `csi-infiblock-plugin` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI) Driver for DATATOM Infinity storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology [Datatom-InfinityCSI (filesystem)](https://github.com/datatom-infinity/infinity-csi) | `csi-infifs-plugin` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI) Driver for DATATOM Infinity filesystem storage | Persistent | Read/Write Multiple Pods | Yes | Expansion [Datera](https://github.com/Datera/datera-csi) | `dsp.csi.daterainc.io` | v1.0 | A Container Storage Interface (CSI) Driver for Datera Data Services Platform (DSP) | Persistent | Read/Write Single Pod | Yes |Snapshot [DDN EXAScaler](https://github.com/DDNStorage/exa-csi-driver) | `exa.csi.ddn.com` | v1.0, v1.1 | A Container Storage Interface (CSI) Driver for DDN EXAScaler filesystems | Persistent | Read/Write Multiple Pods | Yes | Expansion [Dell EMC PowerMax](https://github.com/dell/csi-powermax) | `csi-powermax.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC PowerMax](https://www.delltechnologies.com/en-us/storage/powermax.htm) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology [Dell EMC PowerScale](https://github.com/dell/csi-powerscale) | `csi-isilon.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC PowerScale](https://www.delltechnologies.com/en-us/storage/powerscale.htm) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology [Dell EMC PowerStore](https://github.com/dell/csi-powerstore) | `csi-powerstore.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC PowerStore](https://www.delltechnologies.com/en-us/storage/powerstore-storage-appliance.htm) | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology [Dell EMC Unity](https://github.com/dell/csi-unity) | `csi-unity.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC Unity](https://www.delltechnologies.com/en-us/storage/unity.htm) | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology [Dell EMC VxFlexOS](https://github.com/dell/csi-vxflexos) | `csi-vxflexos.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC VxFlexOS](https://www.delltechnologies.com/en-us/hyperconverged-infrastructure/vxflex.htm) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology [democratic-csi](https://github.com/democratic-csi/democratic-csi) | `org.democratic-csi.[X]` | v1.0,v1.1,v1.2,v1.3,v1.4,v1.5 | Generic CSI plugin supporting zfs based solutions ([FreeNAS](https://www.freenas.org/) / [TrueNAS](https://www.truenas.com/) and [ZoL](https://zfsonlinux.org/) solutions such as [Ubuntu](https://ubuntu.com/)), [Synology](https://www.synology.com/), and more | Persistent and Ephemeral | Read/Write Single Pod (Block Volume)

Read/Write Multiple Pods (File Volume) | Yes | Raw Block, Snapshot, Expansion, Cloning [Diamanti-CSI](https://diamanti.com/use-cases/io-acceleration/#csi) | `dcx.csi.diamanti.com` | v1.0 | A Container Storage Interface (CSI) Driver for Diamanti DCX Platform | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion [DigitalOcean Block Storage](https://github.com/digitalocean/csi-digitalocean) | `dobs.csi.digitalocean.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for DigitalOcean Block Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion [Dothill-CSI](https://github.com/enix/dothill-csi) | `dothill.csi.enix.io` | v1.3 | Generic CSI plugin supporting [Seagate AssuredSan](https://www.seagate.com/fr/fr/support/dothill-san/assuredsan-pro-5000-series/) appliances such as [HPE MSA](https://www.hpe.com/us/en/storage/flash-hybrid.html), [Dell EMC PowerVault ME4](https://www.dell.com/fr-fr/work/shop/productdetailstxn/powervault-me4-series) and others ... | Persistent | Read/Write Single Node | Yes | Snapshot, Expansion [Ember CSI](https://ember-csi.io) | `[x].ember-csi.io` | v0.2, v0.3, v1.0 | Multi-vendor CSI plugin supporting over 80 Drivers to provide block and mount storage to Container Orchestration systems. | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot [Excelero NVMesh](https://github.com/Excelero/nvmesh-csi-driver) | `nvmesh-csi.excelero.com` | v1.0, v1.1 | A Container Storage Interface (CSI) Driver for Excelero NVMesh | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Expansion [GCE Persistent Disk](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) | `pd.csi.storage.gke.io` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Google Compute Engine Persistent Disk (GCE PD) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology [Google Cloud Filestore](https://github.com/kubernetes-sigs/gcp-filestore-csi-driver) | `com.google.csi.filestore` | v0.3 | A Container Storage Interface (CSI) Driver for Google Cloud Filestore | Persistent | Read/Write Multiple Pods | Yes | [Google Cloud Storage](https://github.com/ofek/csi-gcs) | `gcs.csi.ofek.dev` | v1.0 | A Container Storage Interface (CSI) Driver for Google Cloud Storage | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Expansion [GlusterFS](https://github.com/gluster/gluster-csi-driver) | `org.gluster.glusterfs` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for GlusterFS | Persistent | Read/Write Multiple Pods | Yes | Snapshot [Gluster VirtBlock](https://github.com/gluster/gluster-csi-driver) | `org.gluster.glustervirtblock` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Gluster Virtual Block volumes | Persistent | Read/Write Single Pod | Yes | [Hammerspace CSI](https://github.com/hammer-space/csi-plugin) | `com.hammerspace.csi` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Hammerspace Storage | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot [Hedvig](https://documentation.commvault.com/commvault/hedvig/others/pdf/Hedvig_CSI_User_Guide.pdf) | `io.hedvig.csi` | v1.0 | A Container Storage Interface (CSI) Driver for Hedvig | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion [Hetzner Cloud Volumes CSI](https://github.com/hetznercloud/csi-driver) | `csi.hetzner.cloud` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Hetzner Cloud Volumes | Persistent | Read/Write Single Pod | Yes | Raw Block, Expansion [Hitachi Vantara](https://knowledge.hitachivantara.com/Documents/Adapters_and_Drivers/Storage_Adapters_and_Drivers/Containers) | `hspc.csi.hitachi.com` | v1.2 | A Container Storage Interface (CSI) Driver for VSP series Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning [HPE](https://github.com/hpe-storage/csi-driver) | `csi.hpe.com` | v1.3 | A [multi-platform](https://scod.hpedev.io/csi_driver) Container Storage Interface (CSI) driver. Supports [HPE Alletra](https://hpe.com/storage/alletra), [Nimble Storage](https://hpe.com/storage/nimble), [Primera](https://hpe.com/storage/primera) and [3PAR](https://hpe.com/storage/3par) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning [HPE Ezmeral (MapR)](https://github.com/mapr/mapr-csi) | `com.mapr.csi-kdf` | v1.3 | A Container Storage Interface (CSI) Driver for HPE Ezmeral Data Fabric | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning [Huawei Storage CSI](https://github.com/Huawei/eSDK_K8S_Plugin) | `csi.huawei.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for FusionStorage, OceanStor 100D, OceanStor Pacific, OceanStor Dorado V3, OceanStor Dorado V6, OceanStor V3, OceanStor V5 | Persistent | Read/Write Multiple Pod | Yes | Snapshot, Expansion, Cloning [HyperV CSI](https://github.com/Zetanova/hyperv-csi-driver) | `eu.zetanova.csi.hyperv` | v1.0, v1.1 | A Container Storage Interface (CSI) driver to manage hyperv hosts | Persistent | Read/Write Multiple Pods | Yes | [IBM Block Storage](https://github.com/ibm/ibm-block-csi-driver) | `block.csi.ibm.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) [Driver](https://www.ibm.com/docs/en/stg-block-csi-driver) for IBM Spectrum Virtualize Family, IBM FlashSystem A9000 and A9000R, IBM DS8000 Family 8.x and higher. | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology [IBM Spectrum Scale](https://github.com/IBM/ibm-spectrum-scale-csi) | `spectrumscale.csi.ibm.com` | v1.0, v1.1 | A Container Storage Interface (CSI) [Driver](https://www.ibm.com/docs/en/spectrum-scale-csi) for the IBM Spectrum Scale File System | Persistent | Read/Write Multiple Pod | Yes | Snapshot [IBM Cloud Block Storage VPC CSI Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block) | `vpc.block.csi.ibm.io` | v1.0 | A Container Storage Interface (CSI) [Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block) for IBM Cloud Kubernetes Service and Red Hat OpenShift on IBM Cloud | Persistent | Read/Write Single Pod | Yes | Raw Block | [Infinidat](https://github.com/Infinidat/infinibox-csi-driver) | `infinibox-csi-driver` | v1.0, v1.1 | A Container Storage Interface (CSI) Driver for Infinidat [InfiniBox](https://infinidat.com/en/products-technology/infinibox) | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning [Inspur InStorage CSI](https://github.com/OpenInspur/instorage-k8s) | `csi-instorage` | v1.0 | A Container Storage Interface (CSI) Driver for inspur AS/HF/CS/CF Series Primary Storage, inspur AS13000 Series SDS Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning [Intel PMEM-CSI](https://github.com/intel/pmem-csi) | `pmem-csi.intel.com` | v1.0 | A Container Storage Interface (CSI) driver for [PMEM](https://pmem.io/) from Intel | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block [Intelliflash Block Storage](https://github.com/DDNStorage/intelliflash-csi-block-driver) | `intelliflash-csi-block-driver.intelliflash.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for Intelliflash Block Storage | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology [Intelliflash File Storage](https://github.com/DDNStorage/intelliflash-csi-file-driver) | `intelliflash-csi-file-driver.intelliflash.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for Intelliflash File Storage | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology [ionir ](https://github.com/ionir-cloud) | `ionir` | v1.2 | A Container Storage Interface (CSI) Driver for [ionir](https://www.ionir.com/) Kubernetes-Native Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Cloning [JuiceFS](https://github.com/juicedata/juicefs-csi-driver) | `csi.juicefs.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for JuiceFS File System | Persistent | Read/Write Multiple Pod | Yes | [kaDalu](https://github.com/kadalu/kadalu) | `org.kadalu.gluster` | v0.3 | A CSI Driver (and operator) for GlusterFS | Persistent | Read/Write Multiple Pods | Yes | [KumoScale Block Storage](https://github.com/KioxiaAmerica/kumoscale-csi) | `kumoscale.kioxia.com` | v1.0 | A Container Storage Interface (CSI) Driver for KumoScale Block Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology [Linode Block Storage](https://github.com/linode/linode-blockstorage-csi-driver) | `linodebs.csi.linode.com` | v1.0 | A Container Storage Interface (CSI) Driver for Linode Block Storage | Persistent | Read/Write Single Pod | Yes | [LINSTOR](https://github.com/piraeusdatastore/linstor-csi) | `linstor.csi.linbit.com` | v1.2 | A Container Storage Interface (CSI) Driver for [LINSTOR](https://www.linbit.com/en/linstor/) volumes | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology [Longhorn](https://github.com/longhorn/longhorn) | `driver.longhorn.io` | v1.2 | A Container Storage Interface (CSI) Driver for [Longhorn](https://longhorn.io/) volumes | Persistent | Read/Write Single Node | Yes | Raw Block [MacroSAN](https://github.com/macrosan-csi/macrosan-csi-driver) | `csi-macrosan` | v1.0 | A Container Storage Interface (CSI) Driver for MacroSAN Block Storage | Persistent | Read/Write Single Pod | Yes | [Manila](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/manila) | `manila.csi.openstack.org` | v1.1, v1.2 | A Container Storage Interface (CSI) Driver for OpenStack Shared File System Service (Manila) | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Topology [MooseFS](https://github.com/moosefs/moosefs-csi) | `com.tuxera.csi.moosefs` | v1.0 | A Container Storage Interface (CSI) Driver for [MooseFS](https://moosefs.com/) clusters. | Persistent | Read/Write Multiple Pods | Yes | [NetApp](https://github.com/NetApp/trident) | `csi.trident.netapp.io` | v1.0, v1.1, v1.2, v1.3 | A Container Storage Interface (CSI) Driver for NetApp's [Trident](https://netapp-trident.readthedocs.io/) container storage orchestrator | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology [NexentaStor File Storage](https://github.com/Nexenta/nexentastor-csi-driver) | `nexentastor-csi-driver.nexenta.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for NexentaStor File Storage | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology [NexentaStor Block Storage](https://github.com/Nexenta/nexentastor-csi-driver-block) | `nexentastor-block-csi-driver.nexenta.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for NexentaStor over iSCSI protocol | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology, Raw block [Nutanix](https://github.com/nutanix/csi-plugin) | `csi.nutanix.com` | v0.3, v1.0, v1.2 | A Container Storage Interface (CSI) Driver for Nutanix | Persistent | "Read/Write Single Pod" with Nutanix Volumes and "Read/Write Multiple Pods" with Nutanix Files | Yes | Raw Block, Snapshot, Expansion, Cloning [OpenEBS](https://github.com/openebs/csi)| `cstor.csi.openebs.io` | v1.0 | A Container Storage Interface (CSI) Driver for [OpenEBS](https://www.openebs.io/)| Persistent | Read/Write Single Pod | Yes | Expansion, Snapshot, Cloning [Open-E](https://github.com/open-e/JovianDSS-KubernetesCSI) | `com.open-e.joviandss.csi` | v1.0 | A Container Storage Interface (CSI) Driver for Open-E JovianDSS Storage | Persistent | Read/Write Single Pod | Yes | Snapshot, Cloning [Open-Local](https://github.com/alibaba/open-local) | `local.csi.alibaba.com` | v1.0 | A Container Storage Interface (CSI) Driver for Local Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Expansion, Snapshot [Oracle Cloud Infrastructure(OCI) Block Storage](https://github.com/oracle/oci-cloud-controller-manager/blob/master/container-storage-interface.md) | `blockvolume.csi.oraclecloud.com` | v1.1 | A Container Storage Interface (CSI) Driver for Oracle Cloud Infrastructure (OCI) Block Storage | Persistent | Read/Write Single Pod | Yes | Topology [oVirt](https://github.com/openshift/ovirt-csi-driver) | `csi.ovirt.org` | v1.0 | A Container Storage Interface (CSI) Driver for [oVirt](https://ovirt.org) | Persistent | Read/Write Single Pod | Yes | Block, File Storage [Portworx](https://github.com/libopenstorage/openstorage/tree/master/csi) | `pxd.portworx.com` | v1.4 | A Container Storage Interface (CSI) Driver for [Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/csi/) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Raw Block, Cloning [Pure Storage CSI](https://github.com/purestorage/pso-csi)| `pure-csi` | v1.0, v1.1, v1.2, v1.3 | A Container Storage Interface (CSI) Driver for Pure Storage's [Pure Service Orchestrator](https://purestorage.com/containers) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Snapshot, Cloning, Raw Block, Topology, Expansion [QingCloud CSI](https://github.com/yunify/qingcloud-csi)| `disk.csi.qingcloud.com` | v1.1 | A Container Storage Interface (CSI) Driver for QingCloud Block Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning [QingStor CSI](https://github.com/yunify/qingstor-csi) | `neonsan.csi.qingstor.com` | v0.3, v1.1 | A Container Storage Interface (CSI) Driver for NeonSAN storage system | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning [Quobyte](https://github.com/quobyte/quobyte-csi) | `quobyte-csi` | v0.2 | A Container Storage Interface (CSI) Driver for Quobyte | Persistent | Read/Write Multiple Pods | Yes | [ROBIN](https://get.robin.io/) | `robin` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for [ROBIN](https://docs.robin.io) | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning [SandStone](https://github.com/sandstone-storage/sandstone-csi-driver) | `csi-sandstone-plugin` | v1.0 | A Container Storage Interface (CSI) Driver for SandStone USP | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning [Sangfor-EDS-File-Storage](https://github.com/evan37717/sangfor-eds-csi) | `eds.csi.file.sangfor.com` | v1.0 | A Container Storage Interface (CSI) Driver for Sangfor Distributed File Storage(EDS) | Persistent | Read/Write Multiple Pods | Yes | [Sangfor-EDS-Block-Storage](https://github.com/eds-wzc/sangfor-eds-csi) | `eds.csi.block.sangfor.com` | v1.0 | A Container Storage Interface (CSI) Driver for Sangfor Block Storage(EDS) | Persistent | Read/Write Single Pod | Yes | [Scaleway CSI](https://github.com/scaleway/scaleway-csi) | `csi.scaleway.com` | v1.2.0 | Container Storage Interface (CSI) Driver for [Scaleway Block Storage](https://www.scaleway.com/block-storage/) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology [Seagate Exos X](https://github.com/Seagate/seagate-exos-x-csi) | `csi-exos-x.seagate.com` | v1.3 | CSI driver for [Seagate Exos X](https://www.seagate.com/products/storage/data-storage-systems/raid/) and OEM systems | Persistent | Read/Write Single Pod | Yes | Snapshot, Expansion, Cloning [SeaweedFS](https://github.com/seaweedfs/seaweedfs-csi-driver) | `seaweedfs-csi-driver` | v1.0 | A Container Storage Interface (CSI Driver for [SeaweedFS](https://github.com/chrislusf/seaweedfs)) | Persistent | Read/Write Multiple Pods | Yes | [Secrets Store CSI Driver](https://github.com/kubernetes-sigs/secrets-store-csi-driver) | `secrets-store.csi.k8s.io` | v0.0.10 | A Container Storage Interface (CSI) Driver for mounting secrets, keys, and certs stored in enterprise-grade external secrets stores as volumes. | Ephemeral | N/A | N/A | [SmartX](http://www.smartx.com/?locale=en) | `csi-smtx-plugin` | v1.0 | A Container Storage Interface (CSI) Driver for SmartX ZBS Storage | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion [SODA](https://github.com/sodafoundation/nbp/tree/master/csi) | `csi-soda-plugin` | v1.0 | A Container Storage Interface (CSI) Driver for [SODA](https://sodafoundation.io/) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot [SPDK-CSI](https://github.com/spdk/spdk-csi) | `csi.spdk.io` | v1.1 | A Container Storage Interface (CSI) Driver for [SPDK](https://spdk.io/) | Persistent and Ephemeral | Read/Write Single Pod | Yes | [StorageOS](https://docs.storageos.com/docs/platforms/kubernetes/install/) | `storageos` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for [StorageOS](https://storageos.com/) | Persistent | Read/Write Multiple Pods | Yes | [Storidge](https://docs.storidge.com/kubernetes_storage/overview.html) | `csi.cio.storidge.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for [Storidge CIO](https://storidge.com/) | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion [StorPool](https://kb.storpool.com/storpool_integrations/github/kubernetes.html) | `csi-driver.storpool.com` | v1.0 | A Container Storage Interface (CSI) Driver for [StorPool](https://storpool.com/) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Expansion [Synology](https://github.com/SynologyOpenSource/synology-csi) | `csi.san.synology.com` | v1.0 | A Container Storage Interface (CSI) Driver for Synology NAS | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning [Tencent Cloud Block Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)| `com.tencent.cloud.csi.cbs` | v1.0 | A Container Storage Interface (CSI) Driver for Tencent Cloud Block Storage | Persistent | Read/Write Single Pod | Yes | Snapshot [Tencent Cloud File Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)| `com.tencent.cloud.csi.cfs` | v1.0 | A Container Storage Interface (CSI) Driver for Tencent Cloud File Storage | Persistent | Read/Write Multiple Pods | Yes | [Tencent Cloud Object Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)| `com.tencent.cloud.csi.cosfs` | v1.0 | A Container Storage Interface (CSI) Driver for Tencent Cloud Object Storage | Persistent | Read/Write Multiple Pods | No | [TopoLVM](https://github.com/cybozu-go/topolvm)| `topolvm.cybozu.com` | v1.1 | A Container Storage Interface (CSI) Driver for LVM | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block, Expansion, Topology Aware [VAST Data](https://github.com/vast-data/vast-csi) | `csi.vastdata.com` | v1.0 | A Container Storage Interface (CSI) Driver for VAST Data | Persistent | Read/Write Multiple Pods | Yes | [XSKY-EBS](https://xsky-storage.github.io/xsky-csi-driver/csi-block.html) | `csi.block.xsky.com` | v1.0 | A Container Storage Interface (CSI) Driver for XSKY Distributed Block Storage (X-EBS) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning [XSKY-EUS](https://xsky-storage.github.io/xsky-csi-driver/csi-fs.html) | `csi.fs.xsky.com` | v1.0 | A Container Storage Interface (CSI) Driver for XSKY Distributed File Storage (X-EUS) | Persistent | Read/Write Multiple Pods | Yes | [Vault](https://github.com/kubevault/csi-driver) | `secrets.csi.kubevault.com` | v1.0 | A Container Storage Interface (CSI) Driver for mounting HashiCorp Vault secrets as volumes. | Ephemeral | N/A | N/A | [VDA](https://virtual-disk-array.readthedocs.io/en/latest/Introduction.html) | `csi.vda.io` | v1.0 | An open source block storage system base on SPDK | Persistent | Read/Write Single Pod | N/A | [Veritas InfoScale Volumes](https://www.veritas.com/solution/virtualization/containers.html) | `org.veritas.infoscale` | v1.2 | A Container Storage Interface (CSI) Driver for Veritas InfoScale volumes | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning [vSphere](https://github.com/kubernetes-sigs/vsphere-csi-driver) | `csi.vsphere.vmware.com` | v1.0 | A Container Storage Interface (CSI) Driver for VMware vSphere | Persistent | Read/Write Single Pod (Block Volume)

Read/Write Multiple Pods (File Volume) | Yes | Raw Block,

Expansion (Block Volume),

Topology Aware (Block Volume) [Vultr Block Storage](https://github.com/vultr/vultr-csi) | `block.csi.vultr.com` | v1.2 | A Container Storage Interface (CSI) Driver for Vultr Block Storage | Persistent | Read/Write Single Pod | Yes | [WekaIO](https://github.com/weka/csi-wekafs) | `csi.weka.io` | v1.0 | A Container Storage Interface (CSI) Driver for mounting WekaIO WekaFS filesystem as volumes | Persistent | Read/Write Multiple Pods | Yes | [Yandex.Cloud](https://github.com/flant/yandex-csi-driver) | `yandex.csi.flant.com` | v1.2 | A Container Storage Interface (CSI) plugin for Yandex.Cloud Compute Disks | Persistent | Read/Write Single Pod | Yes | [YanRongYun](http://www.yanrongyun.com/) | ? | v1.0 | A Container Storage Interface (CSI) Driver for YanRong YRCloudFile Storage | Persistent | Read/Write Multiple Pods | Yes | [Zadara-CSI](https://github.com/zadarastorage/zadara-csi) | `csi.zadara.com` | v1.0, v1.1 | A Container Storage Interface (CSI) plugin for Zadara VPSA Storage Array & VPSA All-Flash | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning ## Sample Drivers Name | Status | More Information -----|--------|------- [Flexvolume](https://github.com/kubernetes-csi/csi-driver-flex) | Sample | [HostPath](https://github.com/kubernetes-csi/csi-driver-host-path) | v1.2.0 | Only use for a single node tests. See the [Example](example.html) page for Kubernetes-specific instructions. [ImagePopulator](https://github.com/kubernetes-csi/csi-driver-image-populator) | Prototype | Driver that lets you use a container image as an ephemeral volume. [In-memory Sample Mock Driver](https://github.com/kubernetes-csi/csi-test/tree/master/mock/service) | v0.3.0 | The sample mock driver used for [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity) [NFS](https://github.com/kubernetes-csi/csi-driver-nfs) | Sample | [Synology NAS](https://github.com/jparklab/synology-csi) | v1.0.0 | An unofficial (and unsupported) Container Storage Interface Driver for Synology NAS. [VFS Driver](https://github.com/thecodeteam/csi-vfs) | Released | A CSI plugin that provides a virtual file system. ================================================ FILE: go.mod ================================================ module github.com/kastenhq/kubestr go 1.24 replace github.com/graymeta/stow => github.com/kastenhq/stow v0.1.2-kasten require ( github.com/briandowns/spinner v1.23.2 github.com/frankban/quicktest v1.14.6 github.com/golang/mock v1.6.0 github.com/kanisterio/kanister v0.0.0-20250106180853-0abc731c8242 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.10.2 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c k8s.io/api v0.31.4 k8s.io/apimachinery v0.31.4 k8s.io/client-go v0.31.4 ) require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.17.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/errors v0.22.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/strfmt v0.23.0 // indirect github.com/go-openapi/swag v0.22.4 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kanisterio/errkit v0.0.3 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/spdystream v0.5.1 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/openshift/api v0.0.0-20231222123017-053aee22b4b4 // indirect github.com/openshift/client-go v0.0.0-20231221125933-2aa81c72f992 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.9 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.starlark.net v0.0.0-20240314022150-ee8ed142361c // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/net v0.38.0 // indirect golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sync v0.12.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect golang.org/x/time v0.8.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/protobuf v1.36.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.31.4 // indirect k8s.io/cli-runtime v0.31.4 // indirect k8s.io/code-generator v0.31.4 // indirect k8s.io/component-base v0.31.4 // indirect k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/kubectl v0.31.4 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.17.2 // indirect sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) ================================================ FILE: go.sum ================================================ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions v1.3.0 h1:wxQx2Bt4xzPIKvW59WQf1tJNx/ZZKPfN+EhPX3Z6CYY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions v1.3.0/go.mod h1:TpiwjwnW/khS0LKs4vW5UmmT9OWcxaveS8U7+tlknzo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/briandowns/spinner v1.23.2 h1:Zc6ecUnI+YzLmJniCfDNaMbW0Wid1d5+qcTq4L2FW8w= github.com/briandowns/spinner v1.23.2/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kanisterio/errkit v0.0.3 h1:1wHaTqV4DZE0XrN+Nq7Q2M8kyKnV8NhhEF3OB7A/Pd8= github.com/kanisterio/errkit v0.0.3/go.mod h1:0xesKaif6++1IXFdhb6fywa40J07odjwWq3IKzxWC3A= github.com/kanisterio/kanister v0.0.0-20250106180853-0abc731c8242 h1:Ubk92hHanqt0lWkw+AJD0HD/kxyNX099XgkLAAfxKQo= github.com/kanisterio/kanister v0.0.0-20250106180853-0abc731c8242/go.mod h1:GKGelgFnCa/Vc4MDuGlc4DdKGWaN7yIt8oI/Ztsm8V0= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA= github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.5.1 h1:9sNYeYZUcci9R6/w7KDaFWEWeV4LStVG78Mpyq/Zm/Y= github.com/moby/spdystream v0.5.1/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/openshift/api v0.0.0-20231222123017-053aee22b4b4 h1:XHl52N6/q+aE5qvmN3YyHyV2H0xepZTbr/r6Vs5pNjo= github.com/openshift/api v0.0.0-20231222123017-053aee22b4b4/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= github.com/openshift/client-go v0.0.0-20231221125933-2aa81c72f992 h1:JQ/w7ublPBrPRwknrde4apbTR23PDxKYUmkkfo1Nvws= github.com/openshift/client-go v0.0.0-20231221125933-2aa81c72f992/go.mod h1:5W+xoimHjRdZ0dI/yeQR0ANRNLK9mPmXMzUWPAIPADo= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.starlark.net v0.0.0-20240314022150-ee8ed142361c h1:roAjH18hZcwI4hHStHbkXjF5b7UUyZ/0SG3hXNN1SjA= go.starlark.net v0.0.0-20240314022150-ee8ed142361c/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.214.0 h1:h2Gkq07OYi6kusGOaT/9rnNljuXmqPnaig7WGPmKbwA= google.golang.org/api v0.214.0/go.mod h1:bYPpLG8AyeMWwDU6NXoB00xC0DFkikVvd5MfwoxjLqE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.31.4 h1:I2QNzitPVsPeLQvexMEsj945QumYraqv9m74isPDKhM= k8s.io/api v0.31.4/go.mod h1:d+7vgXLvmcdT1BCo79VEgJxHHryww3V5np2OYTr6jdw= k8s.io/apiextensions-apiserver v0.31.4 h1:FxbqzSvy92Ca9DIs5jqot883G0Ln/PGXfm/07t39LS0= k8s.io/apiextensions-apiserver v0.31.4/go.mod h1:hIW9YU8UsqZqIWGG99/gsdIU0Ar45Qd3A12QOe/rvpg= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.31.4 h1:8xjE2C4CzhYVm9DGf60yohpNUh5AEBnPxCryPBECmlM= k8s.io/apimachinery v0.31.4/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/cli-runtime v0.31.4 h1:iczCWiyXaotW+hyF5cWP8RnEYBCzZfJUF6otJ2m9mw0= k8s.io/cli-runtime v0.31.4/go.mod h1:0/pRzAH7qc0hWx40ut1R4jLqiy2w/KnbqdaAI2eFG8U= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v0.31.4 h1:t4QEXt4jgHIkKKlx06+W3+1JOwAFU/2OPiOo7H92eRQ= k8s.io/client-go v0.31.4/go.mod h1:kvuMro4sFYIa8sulL5Gi5GFqUPvfH2O/dXuKstbaaeg= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.31.4 h1:Vu+8fKz+239rKiVDHFVHgjQ162cg5iUQPtTyQbwXeQw= k8s.io/code-generator v0.31.4/go.mod h1:yMDt13Kn7m4MMZ4LxB1KBzdZjEyxzdT4b4qXq+lnI90= k8s.io/component-base v0.31.4 h1:wCquJh4ul9O8nNBSB8N/o8+gbfu3BVQkVw9jAUY/Qtw= k8s.io/component-base v0.31.4/go.mod h1:G4dgtf5BccwiDT9DdejK0qM6zTK0jwDGEKnCmb9+u/s= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kubectl v0.31.4 h1:c8Af8xd1VjyoKyWMW0xHv2+tYxEjne8s6OOziMmaD10= k8s.io/kubectl v0.31.4/go.mod h1:0E0rpXg40Q57wRE6LB9su+4tmwx1IzZrmIEvhQPk0i4= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.4.3 h1:sCP7Vv3xx/CWIuTPVN38lUPx0uw0lcLfzaiDa8Ja01A= sigs.k8s.io/structured-merge-diff/v4 v4.4.3/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= ================================================ FILE: index.md ================================================ # Kubestr ## What is it? Kubestr is a collection of tools to discover, validate and evaluate your kubernetes storage options. As adoption of kubernetes grows so have the persistent storage offerings that are available to users. The introduction of [CSI](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/)(Container Storage Interface) has enabled storage providers to develop drivers with ease. In fact there are around a 100 different CSI drivers available today. Along with the existing in-tree providers, these options can make choosing the right storage difficult. Kubestr can assist in the following ways- - Identify the various storage options present in a cluster. - Validate if the storage options are configured correctly. - Evaluate the storage using common benchmarking tools like FIO. ## Using Kubestr ### To install the tool - - Ensure that the kubernetes context is set and the cluster is accessible through your terminal. (Does [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) work?) - Download the latest release [here](https://github.com/kastenhq/kubestr/releases/latest). - Unpack the tool and make it an executable `chmod +x kubestr`. ### To discover available storage options - - Run `./kubestr` ### To run an FIO test - - Run `./kubestr fio -s ` - Additional options like `--size` and `--fiofile` can be specified. - For more information visit our [fio](https://kastenhq.github.io/kubestr/fio) page. ### To check a CSI drivers snapshot and restore capabilities - - Run `./kubestr csicheck -s -v ` ## Roadmap - In the future we plan to allow users to post their FIO results and compare to others. ================================================ FILE: main.go ================================================ // Copyright 2020 Kubestr Developers // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main //go:generate ./scripts/load_csi_provisioners.sh import ( "github.com/kastenhq/kubestr/cmd" "os" ) func main() { if err := Execute(); err != nil { os.Exit(1) } } // Execute executes the main command func Execute() error { return cmd.Execute() } ================================================ FILE: pkg/block/block_mount.go ================================================ package block import ( "context" "fmt" "time" kankube "github.com/kanisterio/kanister/pkg/kube" "github.com/kanisterio/kanister/pkg/poll" "github.com/kastenhq/kubestr/pkg/csi" "github.com/kastenhq/kubestr/pkg/csi/types" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) type BlockMountCheckerArgs struct { KubeCli kubernetes.Interface DynCli dynamic.Interface StorageClass string Namespace string Cleanup bool RunAsUser int64 ContainerImage string K8sObjectReadyTimeout time.Duration PVCSize string } func (a *BlockMountCheckerArgs) Validate() error { if a.KubeCli == nil || a.DynCli == nil || a.StorageClass == "" || a.Namespace == "" { return fmt.Errorf("require fields are missing. (KubeCli, DynCli, StorageClass, Namespace)") } return nil } // BlockMountChecker tests if a storage class can provision volumes for block mounts. type BlockMountChecker interface { Mount(ctx context.Context) (*BlockMountCheckerResult, error) Cleanup() } type BlockMountCheckerResult struct { StorageClass *sv1.StorageClass } const ( blockMountCheckerPVCNameFmt = "kubestr-blockmount-%s-pvc" blockMountCheckerPodNameFmt = "kubestr-blockmount-%s-pod" blockModeCheckerPodCleanupTimeout = time.Second * 120 blockModeCheckerPVCCleanupTimeout = time.Second * 120 blockModeCheckerPVCDefaultSize = "1Gi" ) // blockMountChecker provides BlockMountChecker type blockMountChecker struct { args BlockMountCheckerArgs podName string pvcName string validator csi.ArgumentValidator appCreator csi.ApplicationCreator cleaner csi.Cleaner podCleanupTimeout time.Duration pvcCleanupTimeout time.Duration } func NewBlockMountChecker(args BlockMountCheckerArgs) (BlockMountChecker, error) { if err := args.Validate(); err != nil { return nil, err } b := &blockMountChecker{} b.args = args b.podName = fmt.Sprintf(blockMountCheckerPodNameFmt, b.args.StorageClass) b.pvcName = fmt.Sprintf(blockMountCheckerPVCNameFmt, b.args.StorageClass) b.validator = csi.NewArgumentValidator(b.args.KubeCli, b.args.DynCli) b.appCreator = csi.NewApplicationCreator(b.args.KubeCli, args.K8sObjectReadyTimeout) b.cleaner = csi.NewCleaner(b.args.KubeCli, b.args.DynCli) b.podCleanupTimeout = blockModeCheckerPodCleanupTimeout b.pvcCleanupTimeout = blockModeCheckerPVCCleanupTimeout return b, nil } func (b *blockMountChecker) Mount(ctx context.Context) (*BlockMountCheckerResult, error) { fmt.Printf("Fetching StorageClass %s ...\n", b.args.StorageClass) sc, err := b.validator.ValidateStorageClass(ctx, b.args.StorageClass) if err != nil { fmt.Printf(" -> Failed to fetch StorageClass(%s): (%v)\n", b.args.StorageClass, err) return nil, err } fmt.Printf(" -> Provisioner: %s\n", sc.Provisioner) if b.args.PVCSize == "" { b.args.PVCSize = blockModeCheckerPVCDefaultSize } restoreSize, err := resource.ParseQuantity(b.args.PVCSize) if err != nil { fmt.Printf(" -> Invalid PVC size %s: (%v)\n", b.args.PVCSize, err) return nil, err } blockMode := v1.PersistentVolumeBlock createPVCArgs := &types.CreatePVCArgs{ Name: b.pvcName, Namespace: b.args.Namespace, StorageClass: b.args.StorageClass, VolumeMode: &blockMode, RestoreSize: &restoreSize, } if b.args.Cleanup { defer b.Cleanup() } fmt.Printf("Provisioning a Volume (%s) for block mode access ...\n", b.args.PVCSize) tB := time.Now() _, err = b.appCreator.CreatePVC(ctx, createPVCArgs) if err != nil { fmt.Printf(" -> Failed to provision a Volume (%v)\n", err) return nil, err } fmt.Printf(" -> Created PVC %s/%s (%s)\n", b.args.Namespace, b.pvcName, time.Since(tB).Truncate(time.Millisecond).String()) fmt.Println("Creating a Pod with a volumeDevice ...") tB = time.Now() _, err = b.appCreator.CreatePod(ctx, &types.CreatePodArgs{ Name: b.podName, Namespace: b.args.Namespace, RunAsUser: b.args.RunAsUser, ContainerImage: b.args.ContainerImage, Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", "tail -f /dev/null"}, PVCMap: map[string]types.VolumePath{ b.pvcName: { DevicePath: "/mnt/block", }, }, }) if err != nil { fmt.Printf(" -> Failed to create Pod (%v)\n", err) return nil, err } fmt.Printf(" -> Created Pod %s/%s\n", b.args.Namespace, b.podName) fmt.Printf(" -> Waiting at most %s for the Pod to become ready ...\n", b.args.K8sObjectReadyTimeout.String()) if err = b.appCreator.WaitForPodReady(ctx, b.args.Namespace, b.podName); err != nil { fmt.Printf(" -> The Pod timed out (%v)\n", err) return nil, err } fmt.Printf(" -> The Pod is ready (%s)\n", time.Since(tB).Truncate(time.Millisecond).String()) return &BlockMountCheckerResult{ StorageClass: sc, }, nil } func (b *blockMountChecker) Cleanup() { var ( ctx = context.Background() err error ) // delete Pod fmt.Printf("Deleting Pod %s/%s ...\n", b.args.Namespace, b.podName) tB := time.Now() err = b.cleaner.DeletePod(ctx, b.podName, b.args.Namespace) if err != nil && !apierrors.IsNotFound(err) { fmt.Printf(" Error deleting Pod %s/%s - (%v)\n", b.args.Namespace, b.podName, err) } // Give it a chance to run ... podWaitCtx, podWaitCancelFn := context.WithTimeout(context.Background(), b.podCleanupTimeout) defer podWaitCancelFn() err = kankube.WaitForPodCompletion(podWaitCtx, b.args.KubeCli, b.args.Namespace, b.podName) if err == nil || (err != nil && apierrors.IsNotFound(err)) { fmt.Printf(" -> Deleted pod (%s)\n", time.Since(tB).Truncate(time.Millisecond).String()) } else { fmt.Printf(" -> Failed to delete Pod in %s\n", time.Since(tB).Truncate(time.Millisecond).String()) } // delete PVC fmt.Printf("Deleting PVC %s/%s ...\n", b.args.Namespace, b.pvcName) tB = time.Now() err = b.cleaner.DeletePVC(ctx, b.pvcName, b.args.Namespace) if err != nil && !apierrors.IsNotFound(err) { fmt.Printf(" Error deleting PVC %s/%s - (%v)\n", b.args.Namespace, b.pvcName, err) } err = b.pvcWaitForTermination(b.pvcCleanupTimeout) if err != nil { fmt.Printf(" -> PVC failed to delete in %s\n", time.Since(tB).Truncate(time.Millisecond).String()) } else { fmt.Printf(" -> Deleted PVC (%s)\n", time.Since(tB).Truncate(time.Millisecond).String()) } } func (b *blockMountChecker) pvcWaitForTermination(timeout time.Duration) error { pvcWaitCtx, pvcWaitCancelFn := context.WithTimeout(context.Background(), timeout) defer pvcWaitCancelFn() return poll.Wait(pvcWaitCtx, func(ctx context.Context) (bool, error) { _, err := b.validator.ValidatePVC(ctx, b.pvcName, b.args.Namespace) if err != nil && apierrors.IsNotFound(err) { return true, nil } return false, nil }) } ================================================ FILE: pkg/block/block_mount_test.go ================================================ package block import ( "context" "errors" "fmt" "testing" "time" qt "github.com/frankban/quicktest" "github.com/golang/mock/gomock" "github.com/kastenhq/kubestr/pkg/csi/mocks" "github.com/kastenhq/kubestr/pkg/csi/types" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" fakedynamic "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes/fake" ) func TestBlockMountCheckerNew(t *testing.T) { kubeCli := fake.NewSimpleClientset() dynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()) invalidArgs := []struct { name string args BlockMountCheckerArgs }{ {"args:empty", BlockMountCheckerArgs{}}, {"args:KubeCli", BlockMountCheckerArgs{ KubeCli: kubeCli, }}, {"args:KubeCli-DynCli", BlockMountCheckerArgs{ KubeCli: kubeCli, DynCli: dynCli, }}, {"args:KubeCli-DynCli-StorageClass", BlockMountCheckerArgs{ KubeCli: kubeCli, DynCli: dynCli, StorageClass: "sc", }}, } for _, tc := range invalidArgs { t.Run(tc.name, func(t *testing.T) { c := qt.New(t) bmt, err := NewBlockMountChecker(tc.args) c.Assert(err, qt.IsNotNil) c.Assert(bmt, qt.IsNil) }) } t.Run("success", func(t *testing.T) { c := qt.New(t) args := BlockMountCheckerArgs{ KubeCli: kubeCli, DynCli: dynCli, StorageClass: "sc", Namespace: "namespace", } bmt, err := NewBlockMountChecker(args) c.Assert(err, qt.IsNil) c.Assert(bmt, qt.IsNotNil) b, ok := bmt.(*blockMountChecker) c.Assert(ok, qt.IsTrue) c.Assert(b.args, qt.Equals, args) c.Assert(b.validator, qt.IsNotNil) c.Assert(b.appCreator, qt.IsNotNil) c.Assert(b.cleaner, qt.IsNotNil) c.Assert(b.podName, qt.Equals, fmt.Sprintf(blockMountCheckerPodNameFmt, args.StorageClass)) c.Assert(b.pvcName, qt.Equals, fmt.Sprintf(blockMountCheckerPVCNameFmt, args.StorageClass)) c.Assert(b.podCleanupTimeout, qt.Equals, blockModeCheckerPodCleanupTimeout) c.Assert(b.pvcCleanupTimeout, qt.Equals, blockModeCheckerPVCCleanupTimeout) }) } func TestBlockMountCheckerPvcWaitForTermination(t *testing.T) { type prepareArgs struct { b *blockMountChecker mockValidator *mocks.MockArgumentValidator } kubeCli := fake.NewSimpleClientset() dynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()) tcs := []struct { name string pvcTimeout time.Duration prepare func(*prepareArgs) expErr error }{ { name: "success", pvcTimeout: time.Hour, prepare: func(pa *prepareArgs) { pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, apierrors.NewNotFound(schema.GroupResource{}, "")) }, }, { name: "timeout", pvcTimeout: time.Microsecond, // pvc wait will timeout prepare: func(pa *prepareArgs) { pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(&v1.PersistentVolumeClaim{}, nil).AnyTimes() }, expErr: context.DeadlineExceeded, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { c := qt.New(t) args := BlockMountCheckerArgs{ KubeCli: kubeCli, DynCli: dynCli, StorageClass: "sc", Namespace: "namespace", } bmt, err := NewBlockMountChecker(args) c.Assert(err, qt.IsNil) c.Assert(bmt, qt.IsNotNil) b, ok := bmt.(*blockMountChecker) c.Assert(ok, qt.IsTrue) ctrl := gomock.NewController(t) defer ctrl.Finish() pa := &prepareArgs{ b: b, mockValidator: mocks.NewMockArgumentValidator(ctrl), } tc.prepare(pa) b.validator = pa.mockValidator err = b.pvcWaitForTermination(tc.pvcTimeout) if tc.expErr != nil { c.Assert(err, qt.ErrorIs, tc.expErr) } else { c.Assert(err, qt.IsNil) } }) } } func TestBlockMountCheckerCleanup(t *testing.T) { type prepareArgs struct { b *blockMountChecker mockCleaner *mocks.MockCleaner mockValidator *mocks.MockArgumentValidator } errNotFound := apierrors.NewNotFound(schema.GroupResource{}, "") someError := errors.New("test error") scName := "sc" namespace := "namespace" runningPod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(blockMountCheckerPodNameFmt, scName), Namespace: namespace, }, Spec: v1.PodSpec{ Containers: []v1.Container{ {Name: "container-0"}, }, }, Status: v1.PodStatus{ Phase: v1.PodRunning, }, } tcs := []struct { name string podTimeout time.Duration pvcTimeout time.Duration objs []runtime.Object prepare func(*prepareArgs) }{ { name: "nothing-found", podTimeout: time.Hour, pvcTimeout: time.Hour, prepare: func(pa *prepareArgs) { pa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(errNotFound) pa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(errNotFound) pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, errNotFound) }, }, { name: "error-deleting-pod", podTimeout: time.Microsecond, // pod wait will timeout pvcTimeout: time.Hour, objs: []runtime.Object{runningPod}, prepare: func(pa *prepareArgs) { pa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(someError) pa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(errNotFound) pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, errNotFound) }, }, { name: "error-deleting-pvc", podTimeout: time.Hour, pvcTimeout: time.Microsecond, // timeout prepare: func(pa *prepareArgs) { pa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(errNotFound) pa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(someError) pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, someError).AnyTimes() }, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { c := qt.New(t) kubeCli := fake.NewSimpleClientset(tc.objs...) dynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()) args := BlockMountCheckerArgs{ KubeCli: kubeCli, DynCli: dynCli, StorageClass: scName, Namespace: namespace, } bmt, err := NewBlockMountChecker(args) c.Assert(err, qt.IsNil) c.Assert(bmt, qt.IsNotNil) b, ok := bmt.(*blockMountChecker) c.Assert(ok, qt.IsTrue) ctrl := gomock.NewController(t) defer ctrl.Finish() pa := &prepareArgs{ b: b, mockCleaner: mocks.NewMockCleaner(ctrl), mockValidator: mocks.NewMockArgumentValidator(ctrl), } tc.prepare(pa) b.validator = pa.mockValidator b.cleaner = pa.mockCleaner b.podCleanupTimeout = tc.podTimeout b.pvcCleanupTimeout = tc.pvcTimeout b.Cleanup() }) } } func TestBlockMountCheckerMount(t *testing.T) { type prepareArgs struct { b *blockMountChecker mockCleaner *mocks.MockCleaner mockValidator *mocks.MockArgumentValidator mockAppCreator *mocks.MockApplicationCreator } errNotFound := apierrors.NewNotFound(schema.GroupResource{}, "") someError := errors.New("test error") scName := "sc" scProvisioner := "provisioenr" sc := &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: scName, }, Provisioner: scProvisioner, } namespace := "namespace" cleanupCalls := func(pa *prepareArgs) { pa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(errNotFound) pa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(errNotFound) pa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, errNotFound) } createPVCArgs := func(b *blockMountChecker) *types.CreatePVCArgs { pvcSize := b.args.PVCSize if pvcSize == "" { pvcSize = blockModeCheckerPVCDefaultSize } restoreSize := resource.MustParse(pvcSize) blockMode := v1.PersistentVolumeBlock return &types.CreatePVCArgs{ Name: b.pvcName, Namespace: b.args.Namespace, StorageClass: b.args.StorageClass, VolumeMode: &blockMode, RestoreSize: &restoreSize, } } createPVC := func(b *blockMountChecker) *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: b.args.Namespace, Name: b.pvcName, }, } } createPodArgs := func(b *blockMountChecker) *types.CreatePodArgs { return &types.CreatePodArgs{ Name: b.podName, Namespace: b.args.Namespace, RunAsUser: b.args.RunAsUser, ContainerImage: b.args.ContainerImage, Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", "tail -f /dev/null"}, PVCMap: map[string]types.VolumePath{ b.pvcName: { DevicePath: "/mnt/block", }, }, } } createPod := func(b *blockMountChecker) *v1.Pod { return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: b.args.Namespace, Name: b.podName, }, } } tcs := []struct { name string podTimeout time.Duration pvcTimeout time.Duration noCleanup bool objs []runtime.Object prepare func(*prepareArgs) result *BlockMountCheckerResult }{ { name: "no-storage-class", podTimeout: time.Hour, pvcTimeout: time.Hour, prepare: func(pa *prepareArgs) { pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(nil, apierrors.NewNotFound(schema.GroupResource{}, pa.b.args.StorageClass)) }, }, { name: "invalid-pvc-size", podTimeout: time.Hour, pvcTimeout: time.Hour, prepare: func(pa *prepareArgs) { pa.b.args.PVCSize = "10Q" pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil) }, }, { name: "create-pvc-error", podTimeout: time.Hour, pvcTimeout: time.Hour, prepare: func(pa *prepareArgs) { pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil) pa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(nil, someError) cleanupCalls(pa) }, }, { name: "create-pod-error", podTimeout: time.Hour, pvcTimeout: time.Hour, prepare: func(pa *prepareArgs) { pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil) pa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(createPVC(pa.b), nil) pa.mockAppCreator.EXPECT().CreatePod(gomock.Any(), createPodArgs(pa.b)).Return(nil, someError) cleanupCalls(pa) }, }, { name: "wait-for-pod-error", podTimeout: time.Hour, pvcTimeout: time.Hour, prepare: func(pa *prepareArgs) { pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil) pa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(createPVC(pa.b), nil) pa.mockAppCreator.EXPECT().CreatePod(gomock.Any(), createPodArgs(pa.b)).Return(createPod(pa.b), nil) pa.mockAppCreator.EXPECT().WaitForPodReady(gomock.Any(), pa.b.args.Namespace, pa.b.podName).Return(someError) cleanupCalls(pa) }, }, { name: "success-no-cleanup", podTimeout: time.Hour, pvcTimeout: time.Hour, noCleanup: true, prepare: func(pa *prepareArgs) { pa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil) pa.b.args.PVCSize = blockModeCheckerPVCDefaultSize pa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(createPVC(pa.b), nil) pa.mockAppCreator.EXPECT().CreatePod(gomock.Any(), createPodArgs(pa.b)).Return(createPod(pa.b), nil) pa.mockAppCreator.EXPECT().WaitForPodReady(gomock.Any(), pa.b.args.Namespace, pa.b.podName).Return(nil) }, result: &BlockMountCheckerResult{ StorageClass: sc, }, }, } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { c := qt.New(t) ctx := context.Background() kubeCli := fake.NewSimpleClientset(tc.objs...) dynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()) args := BlockMountCheckerArgs{ KubeCli: kubeCli, DynCli: dynCli, StorageClass: scName, Namespace: namespace, Cleanup: !tc.noCleanup, } bmt, err := NewBlockMountChecker(args) c.Assert(err, qt.IsNil) c.Assert(bmt, qt.IsNotNil) b, ok := bmt.(*blockMountChecker) c.Assert(ok, qt.IsTrue) ctrl := gomock.NewController(t) defer ctrl.Finish() pa := &prepareArgs{ b: b, mockCleaner: mocks.NewMockCleaner(ctrl), mockValidator: mocks.NewMockArgumentValidator(ctrl), mockAppCreator: mocks.NewMockApplicationCreator(ctrl), } tc.prepare(pa) b.validator = pa.mockValidator b.cleaner = pa.mockCleaner b.appCreator = pa.mockAppCreator b.podCleanupTimeout = tc.podTimeout b.pvcCleanupTimeout = tc.pvcTimeout result, err := b.Mount(ctx) if tc.result != nil { c.Assert(result, qt.DeepEquals, tc.result) c.Assert(err, qt.IsNil) } else { c.Assert(result, qt.IsNil) c.Assert(err, qt.IsNotNil) } }) } } ================================================ FILE: pkg/common/common.go ================================================ package common const ( // VolSnapClassDriverKey describes the driver key in VolumeSnapshotClass resource VolSnapClassDriverKey = "driver" // DefaultPodImage the default pod image DefaultPodImage = "ghcr.io/kastenhq/kubestr:latest" // SnapGroupName describes the snapshot group name SnapGroupName = "snapshot.storage.k8s.io" // VolumeSnapshotClassResourcePlural describes volume snapshot classses VolumeSnapshotClassResourcePlural = "volumesnapshotclasses" // VolumeSnapshotResourcePlural is "volumesnapshots" VolumeSnapshotResourcePlural = "volumesnapshots" // SnapshotVersion is the apiversion of the VolumeSnapshot resource SnapshotVersion = "snapshot.storage.k8s.io/v1" ) ================================================ FILE: pkg/csi/csi.go ================================================ package csi import ( "context" "github.com/kastenhq/kubestr/pkg/csi/types" ) type CSI interface { RunSnapshotRestore(ctx context.Context, args *types.CSISnapshotRestoreArgs) (*types.CSISnapshotRestoreResults, error) } ================================================ FILE: pkg/csi/csi_ops.go ================================================ package csi // This file contains general Kubernetes operations, not just CSI related operations. import ( "context" "fmt" "log" "net/http" "net/url" "strings" "time" "k8s.io/apimachinery/pkg/runtime" "github.com/kanisterio/kanister/pkg/kube" kansnapshot "github.com/kanisterio/kanister/pkg/kube/snapshot" "github.com/kanisterio/kanister/pkg/poll" "github.com/kastenhq/kubestr/pkg/common" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" pf "k8s.io/client-go/tools/portforward" "k8s.io/client-go/transport/spdy" ) const ( defaultReadyWaitTimeout = 2 * time.Minute PVCKind = "PersistentVolumeClaim" PodKind = "Pod" // DefaultVolumeSnapshotClassAnnotation is an annotation used to denote a default VolumeSnapshotClass. DefaultVolumeSnapshotClassAnnotation = "snapshot.storage.kubernetes.io/is-default-class" ) //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_argument_validator.go -package=mocks . ArgumentValidator type ArgumentValidator interface { //Rename ValidatePVC(ctx context.Context, pvcName, namespace string) (*v1.PersistentVolumeClaim, error) FetchPV(ctx context.Context, pvName string) (*v1.PersistentVolume, error) ValidateVolumeSnapshot(ctx context.Context, snapshotName, namespace string, groupVersion *metav1.GroupVersionForDiscovery) (*snapv1.VolumeSnapshot, error) ValidateNamespace(ctx context.Context, namespace string) error ValidateStorageClass(ctx context.Context, storageClass string) (*sv1.StorageClass, error) ValidateVolumeSnapshotClass(ctx context.Context, volumeSnapshotClass string, groupVersion *metav1.GroupVersionForDiscovery) (*unstructured.Unstructured, error) } type validateOperations struct { kubeCli kubernetes.Interface dynCli dynamic.Interface } func NewArgumentValidator(kubeCli kubernetes.Interface, dynCli dynamic.Interface) ArgumentValidator { return &validateOperations{ kubeCli: kubeCli, dynCli: dynCli, } } func (o *validateOperations) ValidatePVC(ctx context.Context, pvcName, namespace string) (*v1.PersistentVolumeClaim, error) { if o.kubeCli == nil { return nil, fmt.Errorf("kubeCli not initialized") } pvc, err := o.kubeCli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{}) if err != nil { return nil, err } return pvc, nil } func (o *validateOperations) ValidateVolumeSnapshot(ctx context.Context, snapshotName, namespace string, groupVersion *metav1.GroupVersionForDiscovery) (*snapv1.VolumeSnapshot, error) { VolSnapGVR := schema.GroupVersionResource{Group: snapv1.GroupName, Version: groupVersion.Version, Resource: common.VolumeSnapshotResourcePlural} uVS, err := o.dynCli.Resource(VolSnapGVR).Namespace(namespace).Get(ctx, snapshotName, metav1.GetOptions{}) if err != nil { log.Fatalf("Failed to get VolumeSnapshot: %v", err) } volumeSnapshot := &snapv1.VolumeSnapshot{} err = runtime.DefaultUnstructuredConverter.FromUnstructured(uVS.UnstructuredContent(), volumeSnapshot) return volumeSnapshot, err } func (o *validateOperations) FetchPV(ctx context.Context, pvName string) (*v1.PersistentVolume, error) { if o.kubeCli == nil { return nil, fmt.Errorf("kubeCli not initialized") } pv, err := o.kubeCli.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err != nil { return nil, err } return pv, nil } func (o *validateOperations) ValidateNamespace(ctx context.Context, namespace string) error { if o.kubeCli == nil { return fmt.Errorf("kubeCli not initialized") } _, err := o.kubeCli.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) return err } func (o *validateOperations) ValidateStorageClass(ctx context.Context, storageClass string) (*sv1.StorageClass, error) { if o.kubeCli == nil { return nil, fmt.Errorf("kubeCli not initialized") } sc, err := o.kubeCli.StorageV1().StorageClasses().Get(ctx, storageClass, metav1.GetOptions{}) if err != nil { return nil, err } return sc, nil } func (o *validateOperations) ValidateVolumeSnapshotClass(ctx context.Context, volumeSnapshotClass string, groupVersion *metav1.GroupVersionForDiscovery) (*unstructured.Unstructured, error) { if o.dynCli == nil { return nil, fmt.Errorf("dynCli not initialized") } VolSnapClassGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: groupVersion.Version, Resource: common.VolumeSnapshotClassResourcePlural} return o.dynCli.Resource(VolSnapClassGVR).Get(ctx, volumeSnapshotClass, metav1.GetOptions{}) } //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_application_creator.go -package=mocks . ApplicationCreator type ApplicationCreator interface { CreatePVC(ctx context.Context, args *types.CreatePVCArgs) (*v1.PersistentVolumeClaim, error) CreatePod(ctx context.Context, args *types.CreatePodArgs) (*v1.Pod, error) WaitForPVCReady(ctx context.Context, namespace string, pvcName string) error WaitForPodReady(ctx context.Context, namespace string, podName string) error } type applicationCreate struct { kubeCli kubernetes.Interface k8sObjectReadyTimeout time.Duration } func NewApplicationCreator(kubeCli kubernetes.Interface, k8sObjectReadyTimeout time.Duration) ApplicationCreator { return &applicationCreate{ kubeCli: kubeCli, k8sObjectReadyTimeout: k8sObjectReadyTimeout, } } func (c *applicationCreate) CreatePVC(ctx context.Context, args *types.CreatePVCArgs) (*v1.PersistentVolumeClaim, error) { if c.kubeCli == nil { return nil, fmt.Errorf("kubeCli not initialized") } if err := args.Validate(); err != nil { return nil, err } pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: args.Name, GenerateName: args.GenerateName, Namespace: args.Namespace, Labels: map[string]string{ createdByLabel: "yes", }, }, Spec: v1.PersistentVolumeClaimSpec{ AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, StorageClassName: &args.StorageClass, VolumeMode: args.VolumeMode, Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: resource.MustParse("1Gi"), }, }, }, } if args.DataSource != nil { pvc.Spec.DataSource = args.DataSource } if args.RestoreSize != nil && !args.RestoreSize.IsZero() { pvc.Spec.Resources.Requests[v1.ResourceStorage] = *args.RestoreSize } pvcRes, err := c.kubeCli.CoreV1().PersistentVolumeClaims(args.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) if err != nil { return pvc, err } return pvcRes, nil } func (c *applicationCreate) CreatePod(ctx context.Context, args *types.CreatePodArgs) (*v1.Pod, error) { if c.kubeCli == nil { return nil, fmt.Errorf("kubeCli not initialized") } if err := args.Validate(); err != nil { return nil, err } if args.ContainerImage == "" { args.ContainerImage = common.DefaultPodImage } volumeNameInPod := "persistent-storage" containerName := args.Name if containerName == "" { containerName = args.GenerateName } pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: args.Name, GenerateName: args.GenerateName, Namespace: args.Namespace, Labels: map[string]string{ createdByLabel: "yes", }, }, Spec: v1.PodSpec{ Containers: []v1.Container{{ Name: containerName, Image: args.ContainerImage, Command: args.Command, Args: args.ContainerArgs, }}, }, } pvcCount := 1 for pvcName, path := range args.PVCMap { pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{ Name: fmt.Sprintf("%s-%d", volumeNameInPod, pvcCount), VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, }, }, }) if len(path.MountPath) != 0 { pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{ Name: fmt.Sprintf("%s-%d", volumeNameInPod, pvcCount), MountPath: path.MountPath, }) } else { pod.Spec.Containers[0].VolumeDevices = append(pod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{ Name: fmt.Sprintf("%s-%d", volumeNameInPod, pvcCount), DevicePath: path.DevicePath, }) } pvcCount++ } if args.RunAsUser > 0 { pod.Spec.SecurityContext = &v1.PodSecurityContext{ RunAsUser: &args.RunAsUser, FSGroup: &args.RunAsUser, } } podRes, err := c.kubeCli.CoreV1().Pods(args.Namespace).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { return pod, err } return podRes, nil } func (c *applicationCreate) WaitForPVCReady(ctx context.Context, namespace, name string) error { if c.kubeCli == nil { return fmt.Errorf("kubeCli not initialized") } err := c.waitForPVCReady(ctx, namespace, name) if err != nil { eventErr := c.getErrorFromEvents(ctx, namespace, name, PVCKind) if eventErr != nil { return errors.Wrapf(eventErr, "had issues creating PVC") } } return err } func (c *applicationCreate) waitForPVCReady(ctx context.Context, namespace string, name string) error { pvcReadyTimeout := c.k8sObjectReadyTimeout if pvcReadyTimeout == 0 { pvcReadyTimeout = defaultReadyWaitTimeout } timeoutCtx, waitCancel := context.WithTimeout(ctx, pvcReadyTimeout) defer waitCancel() return poll.Wait(timeoutCtx, func(ctx context.Context) (bool, error) { pvc, err := c.kubeCli.CoreV1().PersistentVolumeClaims(namespace).Get(timeoutCtx, name, metav1.GetOptions{}) if err != nil { return false, errors.Wrapf(err, "could not find PVC") } if pvc.Status.Phase == v1.ClaimLost { return false, fmt.Errorf("failed to create a PVC, ClaimLost") } return pvc.Status.Phase == v1.ClaimBound, nil }) } func (c *applicationCreate) WaitForPodReady(ctx context.Context, namespace string, podName string) error { if c.kubeCli == nil { return fmt.Errorf("kubeCli not initialized") } err := c.waitForPodReady(ctx, namespace, podName) if err != nil { eventErr := c.getErrorFromEvents(ctx, namespace, podName, PodKind) if eventErr != nil { return errors.Wrapf(eventErr, "had issues creating Pod") } } return err } func (c *applicationCreate) waitForPodReady(ctx context.Context, namespace string, podName string) error { podReadyTimeout := c.k8sObjectReadyTimeout if podReadyTimeout == 0 { podReadyTimeout = defaultReadyWaitTimeout } timeoutCtx, waitCancel := context.WithTimeout(ctx, podReadyTimeout) defer waitCancel() err := kube.WaitForPodReady(timeoutCtx, c.kubeCli, namespace, podName) return err } func (c *applicationCreate) getErrorFromEvents(ctx context.Context, namespace, name, kind string) error { fieldSelectors := fields.Set{ "involvedObject.kind": kind, "involvedObject.name": name, }.AsSelector().String() listOptions := metav1.ListOptions{ TypeMeta: metav1.TypeMeta{Kind: kind}, FieldSelector: fieldSelectors, } events, eventErr := c.kubeCli.CoreV1().Events(namespace).List(ctx, listOptions) if eventErr != nil { return errors.Wrapf(eventErr, "failed to retreieve events for %s of kind: %s", name, kind) } for _, event := range events.Items { if event.Type == v1.EventTypeWarning { return errors.New(event.Message) } } return nil } //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_snapshot_creator.go -package=mocks . SnapshotCreator type SnapshotCreator interface { NewSnapshotter() (kansnapshot.Snapshotter, error) CreateSnapshot(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateSnapshotArgs) (*snapv1.VolumeSnapshot, error) CreateFromSourceCheck(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateFromSourceCheckArgs, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error } type snapshotCreate struct { kubeCli kubernetes.Interface dynCli dynamic.Interface } func (c *snapshotCreate) NewSnapshotter() (kansnapshot.Snapshotter, error) { if c.kubeCli == nil { return nil, fmt.Errorf("kubeCli not initialized") } if c.dynCli == nil { return nil, fmt.Errorf("dynCli not initialized") } return kansnapshot.NewSnapshotter(c.kubeCli, c.dynCli), nil } func (c *snapshotCreate) CreateSnapshot(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateSnapshotArgs) (*snapv1.VolumeSnapshot, error) { if snapshotter == nil || args == nil { return nil, fmt.Errorf("snapshotter or args are empty") } if err := args.Validate(); err != nil { return nil, err } snapshotMeta := kansnapshot.ObjectMeta{ Name: args.SnapshotName, Namespace: args.Namespace, } err := snapshotter.Create(ctx, args.PVCName, &args.VolumeSnapshotClass, true, snapshotMeta) if err != nil { return nil, errors.Wrapf(err, "CSI Driver failed to create snapshot for PVC (%s) in Namespace (%s)", args.PVCName, args.Namespace) } snap, err := snapshotter.Get(ctx, args.SnapshotName, args.Namespace) if err != nil { return nil, errors.Wrapf(err, "failed to get CSI snapshot (%s) in Namespace (%s)", args.SnapshotName, args.Namespace) } return snap, nil } func (c *snapshotCreate) CreateFromSourceCheck(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateFromSourceCheckArgs, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error { if c.dynCli == nil { return fmt.Errorf("dynCli not initialized") } if SnapshotGroupVersion == nil || SnapshotGroupVersion.Version == "" { return fmt.Errorf("snapshot group version not provided") } if snapshotter == nil || args == nil { return fmt.Errorf("snapshotter or args are nil") } if err := args.Validate(); err != nil { return err } targetSnapClassName := clonePrefix + args.VolumeSnapshotClass err := snapshotter.CloneVolumeSnapshotClass(ctx, args.VolumeSnapshotClass, targetSnapClassName, kansnapshot.DeletionPolicyRetain, []string{DefaultVolumeSnapshotClassAnnotation}) if err != nil { return errors.Wrapf(err, "failed to clone a VolumeSnapshotClass to use to restore the snapshot") } defer func() { VolSnapClassGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: SnapshotGroupVersion.Version, Resource: common.VolumeSnapshotClassResourcePlural} err := c.dynCli.Resource(VolSnapClassGVR).Delete(ctx, targetSnapClassName, metav1.DeleteOptions{}) if err != nil { fmt.Printf("Delete VSC Error (%s) - (%v)\n", targetSnapClassName, err) } }() snapSrc, err := snapshotter.GetSource(ctx, args.SnapshotName, args.Namespace) if err != nil { return errors.Wrapf(err, "failed to get source snapshot source (%s)", args.SnapshotName) } snapshotCFSCloneName := clonePrefix + args.SnapshotName // test the CreateFromSource API defer func() { _, _ = snapshotter.Delete(context.Background(), snapshotCFSCloneName, args.Namespace) }() src := &kansnapshot.Source{ Handle: snapSrc.Handle, Driver: snapSrc.Driver, VolumeSnapshotClassName: targetSnapClassName, } snapshotMeta := kansnapshot.ObjectMeta{ Name: snapshotCFSCloneName, Namespace: args.Namespace, } err = snapshotter.CreateFromSource(ctx, src, true, snapshotMeta, kansnapshot.ObjectMeta{}) if err != nil { return errors.Wrapf(err, "failed to clone snapshot from source (%s)", snapshotCFSCloneName) } return nil } //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_cleaner.go -package=mocks . Cleaner type Cleaner interface { DeletePVC(ctx context.Context, pvcName string, namespace string) error DeletePod(ctx context.Context, podName string, namespace string) error DeleteSnapshot(ctx context.Context, snapshotName string, namespace string, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error } type cleanse struct { kubeCli kubernetes.Interface dynCli dynamic.Interface } func NewCleaner(kubeCli kubernetes.Interface, dynCli dynamic.Interface) Cleaner { return &cleanse{ kubeCli: kubeCli, dynCli: dynCli, } } func (c *cleanse) DeletePVC(ctx context.Context, pvcName string, namespace string) error { if c.kubeCli == nil { return fmt.Errorf("kubeCli not initialized") } return c.kubeCli.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvcName, metav1.DeleteOptions{}) } func (c *cleanse) DeletePod(ctx context.Context, podName string, namespace string) error { if c.kubeCli == nil { return fmt.Errorf("kubeCli not initialized") } return c.kubeCli.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{}) } func (c *cleanse) DeleteSnapshot(ctx context.Context, snapshotName string, namespace string, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error { if c.dynCli == nil { return fmt.Errorf("dynCli not initialized") } if SnapshotGroupVersion == nil || SnapshotGroupVersion.Version == "" { return fmt.Errorf("snapshot group version not provided") } VolSnapGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: SnapshotGroupVersion.Version, Resource: common.VolumeSnapshotResourcePlural} return c.dynCli.Resource(VolSnapGVR).Namespace(namespace).Delete(ctx, snapshotName, metav1.DeleteOptions{}) } //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_api_version_fetcher.go -package=mocks . ApiVersionFetcher type ApiVersionFetcher interface { GetCSISnapshotGroupVersion() (*metav1.GroupVersionForDiscovery, error) } type apiVersionFetch struct { kubeCli kubernetes.Interface } func (p *apiVersionFetch) GetCSISnapshotGroupVersion() (*metav1.GroupVersionForDiscovery, error) { if p.kubeCli == nil { return nil, fmt.Errorf("kubeCli not initialized") } groups, _, err := p.kubeCli.Discovery().ServerGroupsAndResources() if err != nil { return nil, err } for _, group := range groups { if group.Name == common.SnapGroupName { return &group.PreferredVersion, nil } } return nil, fmt.Errorf("snapshot API group not found") } //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_data_validator.go -package=mocks . DataValidator type DataValidator interface { FetchPodData(ctx context.Context, podName string, podNamespace string) (string, error) } type validateData struct { kubeCli kubernetes.Interface } func (p *validateData) FetchPodData(ctx context.Context, podName string, podNamespace string) (string, error) { if p.kubeCli == nil { return "", fmt.Errorf("kubeCli not initialized") } stdout, _, err := kube.Exec(ctx, p.kubeCli, podNamespace, podName, "", []string{"sh", "-c", "cat /data/out.txt"}, nil) return stdout, err } //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_port_forwarder.go -package=mocks . PortForwarder type PortForwarder interface { FetchRestConfig() (*rest.Config, error) PortForwardAPod(req *types.PortForwardAPodRequest) error } type portforward struct{} func (p *portforward) PortForwardAPod(req *types.PortForwardAPodRequest) error { path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", req.Pod.Namespace, req.Pod.Name) hostIP := strings.TrimPrefix(req.RestConfig.Host, "https://") transport, upgrader, err := spdy.RoundTripperFor(req.RestConfig) if err != nil { return err } dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &url.URL{Scheme: "https", Path: path, Host: hostIP}) fw, err := pf.New(dialer, []string{fmt.Sprintf("%d:%d", req.LocalPort, req.PodPort)}, req.StopCh, req.ReadyCh, &req.OutStream, &req.ErrOutStream) if err != nil { return err } return fw.ForwardPorts() } func (p *portforward) FetchRestConfig() (*rest.Config, error) { return kube.LoadConfig() } //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_kube_executor.go -package=mocks . KubeExecutor type KubeExecutor interface { Exec(ctx context.Context, namespace string, podName string, ContainerName string, command []string) (string, error) } type kubeExec struct { kubeCli kubernetes.Interface } func (k *kubeExec) Exec(ctx context.Context, namespace string, podName string, ContainerName string, command []string) (string, error) { if k.kubeCli == nil { return "", fmt.Errorf("kubeCli not initialized") } stdout, _, err := kube.Exec(ctx, k.kubeCli, namespace, podName, ContainerName, command, nil) return stdout, err } ================================================ FILE: pkg/csi/csi_ops_test.go ================================================ package csi import ( "context" "errors" "fmt" "strings" kansnapshot "github.com/kanisterio/kanister/pkg/kube/snapshot" "github.com/kastenhq/kubestr/pkg/common" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" pkgerrors "github.com/pkg/errors" . "gopkg.in/check.v1" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" discoveryfake "k8s.io/client-go/discovery/fake" "k8s.io/client-go/dynamic" fakedynamic "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" k8stesting "k8s.io/client-go/testing" ) func (s *CSITestSuite) TestGetDriverNameFromUVSC(c *C) { for _, tc := range []struct { vsc unstructured.Unstructured version string expOut string }{ { vsc: unstructured.Unstructured{ Object: map[string]interface{}{ common.VolSnapClassDriverKey: "p2", }, }, version: common.SnapshotVersion, expOut: "p2", }, { vsc: unstructured.Unstructured{ Object: map[string]interface{}{}, }, version: common.SnapshotVersion, expOut: "", }, { vsc: unstructured.Unstructured{ Object: map[string]interface{}{ common.VolSnapClassDriverKey: map[string]string{}, }, }, version: common.SnapshotVersion, expOut: "", }, } { driverName := getDriverNameFromUVSC(tc.vsc, tc.version) c.Assert(driverName, Equals, tc.expOut) } } func (s *CSITestSuite) TestGetCSISnapshotGroupVersion(c *C) { for _, tc := range []struct { cli kubernetes.Interface resources []*metav1.APIResourceList errChecker Checker gvChecker Checker }{ { cli: fake.NewSimpleClientset(), resources: []*metav1.APIResourceList{ { GroupVersion: "/////", }, }, errChecker: NotNil, gvChecker: IsNil, }, { cli: fake.NewSimpleClientset(), resources: []*metav1.APIResourceList{ { GroupVersion: "snapshot.storage.k8s.io/v1", }, }, errChecker: IsNil, gvChecker: NotNil, }, { cli: fake.NewSimpleClientset(), resources: []*metav1.APIResourceList{ { GroupVersion: "notrbac.authorization.k8s.io/v1", }, }, errChecker: NotNil, gvChecker: IsNil, }, { cli: nil, resources: nil, errChecker: NotNil, gvChecker: IsNil, }, } { cli := tc.cli if cli != nil { cli.Discovery().(*discoveryfake.FakeDiscovery).Resources = tc.resources } p := &apiVersionFetch{kubeCli: cli} gv, err := p.GetCSISnapshotGroupVersion() c.Check(err, tc.errChecker) c.Check(gv, tc.gvChecker) } } func (s *CSITestSuite) TestValidatePVC(c *C) { ctx := context.Background() ops := NewArgumentValidator(fake.NewSimpleClientset(), nil) pvc, err := ops.ValidatePVC(ctx, "pvc", "ns") c.Check(err, NotNil) c.Check(pvc, IsNil) ops = NewArgumentValidator(fake.NewSimpleClientset(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Namespace: "ns", Name: "pvc", }, }), nil) pvc, err = ops.ValidatePVC(ctx, "pvc", "ns") c.Check(err, IsNil) c.Check(pvc, NotNil) ops = NewArgumentValidator(nil, nil) pvc, err = ops.ValidatePVC(ctx, "pvc", "ns") c.Check(err, NotNil) c.Check(pvc, IsNil) } func (s *CSITestSuite) TestFetchPV(c *C) { ctx := context.Background() ops := NewArgumentValidator(fake.NewSimpleClientset(), nil) pv, err := ops.FetchPV(ctx, "pv") c.Check(err, NotNil) c.Check(pv, IsNil) ops = NewArgumentValidator(fake.NewSimpleClientset(&v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "pv", }, }), nil) pv, err = ops.FetchPV(ctx, "pv") c.Check(err, IsNil) c.Check(pv, NotNil) ops = NewArgumentValidator(nil, nil) pv, err = ops.FetchPV(ctx, "pv") c.Check(err, NotNil) c.Check(pv, IsNil) } func (s *CSITestSuite) TestValidateNamespace(c *C) { ctx := context.Background() ops := NewArgumentValidator(fake.NewSimpleClientset(), nil) err := ops.ValidateNamespace(ctx, "ns") c.Check(err, NotNil) ops = NewArgumentValidator(fake.NewSimpleClientset(&v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "ns", }, }), nil) err = ops.ValidateNamespace(ctx, "ns") c.Check(err, IsNil) ops = NewArgumentValidator(nil, nil) err = ops.ValidateNamespace(ctx, "ns") c.Check(err, NotNil) } func (s *CSITestSuite) TestValidateStorageClass(c *C) { ctx := context.Background() ops := &validateOperations{ kubeCli: fake.NewSimpleClientset(), } sc, err := ops.ValidateStorageClass(ctx, "sc") c.Check(err, NotNil) c.Check(sc, IsNil) ops = &validateOperations{ kubeCli: fake.NewSimpleClientset(&sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }), } sc, err = ops.ValidateStorageClass(ctx, "sc") c.Check(err, IsNil) c.Check(sc, NotNil) ops = &validateOperations{ kubeCli: nil, } sc, err = ops.ValidateStorageClass(ctx, "sc") c.Check(err, NotNil) c.Check(sc, IsNil) } func (s *CSITestSuite) TestValidateVolumeSnapshotClass(c *C) { ctx := context.Background() for _, tc := range []struct { ops *validateOperations groupVersion string version string errChecker Checker uVCSChecker Checker }{ { ops: &validateOperations{ dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), }, groupVersion: common.SnapshotVersion, errChecker: NotNil, uVCSChecker: IsNil, }, { ops: &validateOperations{ dynCli: fakedynamic.NewSimpleDynamicClient( runtime.NewScheme(), &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": fmt.Sprintf("%s/%s", kansnapshot.GroupName, kansnapshot.Version), "kind": "VolumeSnapshotClass", "metadata": map[string]interface{}{ "name": "vsc", }, "driver": "somesnapshotter", "deletionPolicy": "Delete", }, }, ), }, groupVersion: common.SnapshotVersion, version: kansnapshot.Version, errChecker: IsNil, uVCSChecker: NotNil, }, } { uVSC, err := tc.ops.ValidateVolumeSnapshotClass(ctx, "vsc", &metav1.GroupVersionForDiscovery{GroupVersion: tc.groupVersion, Version: tc.version}) c.Check(err, tc.errChecker) c.Check(uVSC, tc.uVCSChecker) } } func (s *CSITestSuite) TestCreatePVC(c *C) { ctx := context.Background() resourceQuantity := resource.MustParse("1Gi") for _, tc := range []struct { cli kubernetes.Interface args *types.CreatePVCArgs failCreates bool errChecker Checker pvcChecker Checker }{ { cli: fake.NewSimpleClientset(), args: &types.CreatePVCArgs{ GenerateName: "genName", StorageClass: "sc", Namespace: "ns", DataSource: &v1.TypedLocalObjectReference{ Name: "ds", }, RestoreSize: &resourceQuantity, }, errChecker: IsNil, pvcChecker: NotNil, }, { cli: fake.NewSimpleClientset(), args: &types.CreatePVCArgs{ GenerateName: "genName", StorageClass: "sc", Namespace: "ns", DataSource: &v1.TypedLocalObjectReference{ Name: "ds", }, }, errChecker: IsNil, pvcChecker: NotNil, }, { cli: fake.NewSimpleClientset(), args: &types.CreatePVCArgs{ GenerateName: "genName", StorageClass: "sc", Namespace: "ns", }, errChecker: IsNil, pvcChecker: NotNil, }, { cli: fake.NewSimpleClientset(), args: &types.CreatePVCArgs{ GenerateName: "genName", StorageClass: "sc", Namespace: "ns", }, failCreates: true, errChecker: NotNil, pvcChecker: NotNil, }, { cli: fake.NewSimpleClientset(), args: &types.CreatePVCArgs{ GenerateName: "", StorageClass: "sc", Namespace: "ns", }, errChecker: NotNil, pvcChecker: IsNil, }, { cli: fake.NewSimpleClientset(), args: &types.CreatePVCArgs{ GenerateName: "something", StorageClass: "", Namespace: "ns", }, errChecker: NotNil, pvcChecker: IsNil, }, { cli: fake.NewSimpleClientset(), args: &types.CreatePVCArgs{ GenerateName: "Something", StorageClass: "sc", Namespace: "", }, errChecker: NotNil, pvcChecker: IsNil, }, { cli: nil, args: &types.CreatePVCArgs{}, errChecker: NotNil, pvcChecker: IsNil, }, } { appCreator := NewApplicationCreator(tc.cli, 0) creator := appCreator.(*applicationCreate) if tc.failCreates { creator.kubeCli.(*fake.Clientset).PrependReactor("create", "persistentvolumeclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, errors.New("Error creating object") }) } pvc, err := creator.CreatePVC(ctx, tc.args) c.Check(pvc, tc.pvcChecker) c.Check(err, tc.errChecker) if pvc != nil && err == nil { _, ok := pvc.Labels[createdByLabel] c.Assert(ok, Equals, true) c.Assert(pvc.GenerateName, Equals, tc.args.GenerateName) c.Assert(pvc.Namespace, Equals, tc.args.Namespace) c.Assert(pvc.Spec.AccessModes, DeepEquals, []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}) c.Assert(*pvc.Spec.StorageClassName, Equals, tc.args.StorageClass) c.Assert(pvc.Spec.DataSource, DeepEquals, tc.args.DataSource) if tc.args.RestoreSize != nil { c.Assert(pvc.Spec.Resources, DeepEquals, v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: *tc.args.RestoreSize, }, }) } else { c.Assert(pvc.Spec.Resources, DeepEquals, v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: resource.MustParse("1Gi"), }, }) } } } } func (s *CSITestSuite) TestCreatePod(c *C) { ctx := context.Background() for _, tc := range []struct { description string cli kubernetes.Interface args *types.CreatePodArgs failCreates bool errChecker Checker podChecker Checker }{ { description: "pod with container image and runAsUser 1000 created", cli: fake.NewSimpleClientset(), args: &types.CreatePodArgs{ GenerateName: "name", Namespace: "ns", Command: []string{"somecommand"}, RunAsUser: 1000, ContainerImage: "containerimage", PVCMap: map[string]types.VolumePath{ "pvcname": { MountPath: "/mnt/fs", }, }, }, errChecker: IsNil, podChecker: NotNil, }, { description: "Pod creation error on kubeCli", cli: fake.NewSimpleClientset(), args: &types.CreatePodArgs{ GenerateName: "name", Namespace: "ns", Command: []string{"somecommand"}, PVCMap: map[string]types.VolumePath{ "pvcname": { MountPath: "/mnt/fs", }, }, }, failCreates: true, errChecker: NotNil, podChecker: NotNil, }, { description: "Neither Name nor GenerateName set", cli: fake.NewSimpleClientset(), args: &types.CreatePodArgs{ GenerateName: "", Namespace: "ns", Command: []string{"somecommand"}, PVCMap: map[string]types.VolumePath{ "pvcname": { MountPath: "/mnt/fs", }, }, }, errChecker: NotNil, podChecker: IsNil, }, { description: "Both Name and GenerateName set", cli: fake.NewSimpleClientset(), args: &types.CreatePodArgs{ GenerateName: "name", Name: "name", Namespace: "ns", Command: []string{"somecommand"}, PVCMap: map[string]types.VolumePath{ "pvcname": { MountPath: "/mnt/fs", }, }, }, errChecker: NotNil, podChecker: IsNil, }, { description: "Neither MountPath nor DevicePath set error", cli: fake.NewSimpleClientset(), args: &types.CreatePodArgs{ GenerateName: "name", Namespace: "ns", Command: []string{"somecommand"}, PVCMap: map[string]types.VolumePath{"pvcname": {}}, }, errChecker: NotNil, podChecker: IsNil, }, { description: "Both MountPath and DevicePath set error", cli: fake.NewSimpleClientset(), args: &types.CreatePodArgs{ GenerateName: "name", Namespace: "ns", Command: []string{"somecommand"}, PVCMap: map[string]types.VolumePath{ "pvcname": { MountPath: "/mnt/fs", DevicePath: "/mnt/dev", }, }, }, errChecker: NotNil, podChecker: IsNil, }, { description: "PVC name not set error", cli: fake.NewSimpleClientset(), args: &types.CreatePodArgs{ GenerateName: "name", Namespace: "ns", Command: []string{"somecommand"}, PVCMap: map[string]types.VolumePath{"": {MountPath: "/mnt/fs"}}, }, errChecker: NotNil, podChecker: IsNil, }, { description: "default namespace pod is created", cli: fake.NewSimpleClientset(), args: &types.CreatePodArgs{ GenerateName: "name", Namespace: "", Command: []string{"somecommand"}, PVCMap: map[string]types.VolumePath{ "pvcname": { MountPath: "/mnt/fs", }, }, }, errChecker: NotNil, podChecker: IsNil, }, { description: "ns namespace pod is created (GenerateName/MountPath)", cli: fake.NewSimpleClientset(), args: &types.CreatePodArgs{ GenerateName: "name", Namespace: "ns", Command: []string{"somecommand"}, PVCMap: map[string]types.VolumePath{ "pvcname": { MountPath: "/mnt/fs", }, }, }, errChecker: IsNil, podChecker: NotNil, }, { description: "ns namespace pod is created (Name/DevicePath)", cli: fake.NewSimpleClientset(), args: &types.CreatePodArgs{ Name: "name", Namespace: "ns", Command: []string{"somecommand"}, PVCMap: map[string]types.VolumePath{ "pvcname": { DevicePath: "/mnt/dev", }, }, }, errChecker: IsNil, podChecker: NotNil, }, { description: "kubeCli not initialized", cli: nil, args: &types.CreatePodArgs{}, errChecker: NotNil, podChecker: IsNil, }, } { fmt.Println("test:", tc.description) creator := &applicationCreate{kubeCli: tc.cli} if tc.failCreates { creator.kubeCli.(*fake.Clientset).PrependReactor("create", "pods", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, errors.New("Error creating object") }) } pod, err := creator.CreatePod(ctx, tc.args) c.Check(pod, tc.podChecker) c.Check(err, tc.errChecker) if pod != nil && err == nil { _, ok := pod.Labels[createdByLabel] c.Assert(ok, Equals, true) if tc.args.GenerateName != "" { c.Assert(pod.GenerateName, Equals, tc.args.GenerateName) c.Assert(pod.Spec.Containers[0].Name, Equals, tc.args.GenerateName) } else { c.Assert(pod.Name, Equals, tc.args.Name) c.Assert(pod.Spec.Containers[0].Name, Equals, tc.args.Name) } c.Assert(pod.Namespace, Equals, tc.args.Namespace) c.Assert(len(pod.Spec.Containers), Equals, 1) c.Assert(pod.Spec.Containers[0].Command, DeepEquals, tc.args.Command) c.Assert(pod.Spec.Containers[0].Args, DeepEquals, tc.args.ContainerArgs) index := 0 pvcCount := 1 for pvcName, path := range tc.args.PVCMap { if len(path.MountPath) != 0 { c.Assert(pod.Spec.Containers[0].VolumeMounts[index], DeepEquals, v1.VolumeMount{ Name: fmt.Sprintf("persistent-storage-%d", pvcCount), MountPath: path.MountPath, }) c.Assert(pod.Spec.Containers[0].VolumeDevices, IsNil) } else { c.Assert(pod.Spec.Containers[0].VolumeDevices[index], DeepEquals, v1.VolumeDevice{ Name: fmt.Sprintf("persistent-storage-%d", pvcCount), DevicePath: path.DevicePath, }) c.Assert(pod.Spec.Containers[0].VolumeMounts, IsNil) } c.Assert(pod.Spec.Volumes[index], DeepEquals, v1.Volume{ Name: fmt.Sprintf("persistent-storage-%d", pvcCount), VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, }, }, }) index++ pvcCount++ } if tc.args.ContainerImage == "" { c.Assert(pod.Spec.Containers[0].Image, Equals, common.DefaultPodImage) } else { c.Assert(pod.Spec.Containers[0].Image, Equals, tc.args.ContainerImage) } if tc.args.RunAsUser > 0 { c.Assert(pod.Spec.SecurityContext, DeepEquals, &v1.PodSecurityContext{ RunAsUser: &tc.args.RunAsUser, FSGroup: &tc.args.RunAsUser, }) } else { c.Check(pod.Spec.SecurityContext, IsNil) } } } } func (s *CSITestSuite) TestCreateSnapshot(c *C) { ctx := context.Background() for _, tc := range []struct { snapshotter kansnapshot.Snapshotter args *types.CreateSnapshotArgs snapChecker Checker errChecker Checker }{ { snapshotter: &fakeSnapshotter{ getSnap: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "createdName", }, }, }, args: &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "pvc", VolumeSnapshotClass: "vsc", SnapshotName: "snap1", }, snapChecker: NotNil, errChecker: IsNil, }, { snapshotter: &fakeSnapshotter{ getErr: fmt.Errorf("get Error"), }, args: &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "pvc", VolumeSnapshotClass: "vsc", SnapshotName: "snap1", }, snapChecker: IsNil, errChecker: NotNil, }, { snapshotter: &fakeSnapshotter{ createErr: fmt.Errorf("create Error"), }, args: &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "pvc", VolumeSnapshotClass: "vsc", SnapshotName: "snap1", }, snapChecker: IsNil, errChecker: NotNil, }, { snapshotter: &fakeSnapshotter{ createErr: fmt.Errorf("create Error"), }, args: &types.CreateSnapshotArgs{ Namespace: "", PVCName: "pvc", VolumeSnapshotClass: "vsc", SnapshotName: "snap1", }, snapChecker: IsNil, errChecker: NotNil, }, { snapshotter: &fakeSnapshotter{ createErr: fmt.Errorf("create Error"), }, args: &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "", VolumeSnapshotClass: "vsc", SnapshotName: "snap1", }, snapChecker: IsNil, errChecker: NotNil, }, { snapshotter: &fakeSnapshotter{ createErr: fmt.Errorf("create Error"), }, args: &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "pvc", VolumeSnapshotClass: "", SnapshotName: "snap1", }, snapChecker: IsNil, errChecker: NotNil, }, { snapshotter: &fakeSnapshotter{ createErr: fmt.Errorf("create Error"), }, args: &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "pvc", VolumeSnapshotClass: "vsc", SnapshotName: "", }, snapChecker: IsNil, errChecker: NotNil, }, { snapshotter: &fakeSnapshotter{}, snapChecker: IsNil, errChecker: NotNil, }, { snapChecker: IsNil, errChecker: NotNil, }, } { snapCreator := &snapshotCreate{} snapshot, err := snapCreator.CreateSnapshot(ctx, tc.snapshotter, tc.args) c.Check(snapshot, tc.snapChecker) c.Check(err, tc.errChecker) } } func (s *CSITestSuite) TestCreateFromSourceCheck(c *C) { ctx := context.Background() gv := &metav1.GroupVersionForDiscovery{Version: kansnapshot.Version} for _, tc := range []struct { dyncli dynamic.Interface snapshotter kansnapshot.Snapshotter args *types.CreateFromSourceCheckArgs groupVersion *metav1.GroupVersionForDiscovery errChecker Checker }{ { dyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), snapshotter: &fakeSnapshotter{ gsSrc: &kansnapshot.Source{ Handle: "handle", Driver: "driver", }, }, args: &types.CreateFromSourceCheckArgs{ VolumeSnapshotClass: "vsc", SnapshotName: "snapshot", Namespace: "ns", }, groupVersion: gv, errChecker: IsNil, }, { dyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), snapshotter: &fakeSnapshotter{ gsSrc: &kansnapshot.Source{ Handle: "handle", Driver: "driver", }, cfsErr: fmt.Errorf("cfs error"), }, args: &types.CreateFromSourceCheckArgs{ VolumeSnapshotClass: "vsc", SnapshotName: "snapshot", Namespace: "ns", }, groupVersion: gv, errChecker: NotNil, }, { dyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), snapshotter: &fakeSnapshotter{ gsErr: fmt.Errorf("gs error"), }, args: &types.CreateFromSourceCheckArgs{ VolumeSnapshotClass: "vsc", SnapshotName: "snapshot", Namespace: "ns", }, groupVersion: gv, errChecker: NotNil, }, { dyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), snapshotter: &fakeSnapshotter{ cvsErr: fmt.Errorf("cvs error"), }, args: &types.CreateFromSourceCheckArgs{ VolumeSnapshotClass: "vsc", SnapshotName: "snapshot", Namespace: "ns", }, groupVersion: gv, errChecker: NotNil, }, { dyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), snapshotter: &fakeSnapshotter{}, args: &types.CreateFromSourceCheckArgs{ VolumeSnapshotClass: "", SnapshotName: "snapshot", Namespace: "ns", }, groupVersion: gv, errChecker: NotNil, }, { dyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), snapshotter: &fakeSnapshotter{}, args: &types.CreateFromSourceCheckArgs{ VolumeSnapshotClass: "vsc", SnapshotName: "", Namespace: "ns", }, groupVersion: gv, errChecker: NotNil, }, { dyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), snapshotter: &fakeSnapshotter{}, args: &types.CreateFromSourceCheckArgs{ VolumeSnapshotClass: "vsc", SnapshotName: "snapshot", Namespace: "", }, groupVersion: gv, errChecker: NotNil, }, { dyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), snapshotter: &fakeSnapshotter{}, groupVersion: gv, errChecker: NotNil, }, { dyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), groupVersion: gv, errChecker: NotNil, }, { dyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), groupVersion: nil, errChecker: NotNil, }, { dyncli: nil, errChecker: NotNil, }, } { snapCreator := &snapshotCreate{ dynCli: tc.dyncli, } err := snapCreator.CreateFromSourceCheck(ctx, tc.snapshotter, tc.args, tc.groupVersion) c.Check(err, tc.errChecker) } } type fakeSnapshotter struct { name string createErr error getSnap *snapv1.VolumeSnapshot getErr error cvsErr error gsSrc *kansnapshot.Source gsErr error cfsErr error } func (f *fakeSnapshotter) GroupVersion(ctx context.Context) schema.GroupVersion { return schema.GroupVersion{ Group: common.SnapGroupName, Version: "v1", } } func (f *fakeSnapshotter) GetVolumeSnapshotClass(ctx context.Context, annotationKey, annotationValue, storageClassName string) (string, error) { return "", nil } func (f *fakeSnapshotter) CloneVolumeSnapshotClass(ctx context.Context, sourceClassName, targetClassName, newDeletionPolicy string, excludeAnnotations []string) error { return f.cvsErr } func (f *fakeSnapshotter) Create(ctx context.Context, pvcName string, snapshotClass *string, waitForReady bool, snapshotMeta kansnapshot.ObjectMeta) error { return f.createErr } func (f *fakeSnapshotter) Get(ctx context.Context, name, namespace string) (*snapv1.VolumeSnapshot, error) { return f.getSnap, f.getErr } func (f *fakeSnapshotter) Delete(ctx context.Context, name, namespace string) (*snapv1.VolumeSnapshot, error) { return nil, nil } func (f *fakeSnapshotter) DeleteContent(ctx context.Context, name string) error { return nil } func (f *fakeSnapshotter) Clone(ctx context.Context, name, namespace string, waitForReady bool, snapshotMeta, contentMeta kansnapshot.ObjectMeta) error { return nil } func (f *fakeSnapshotter) GetSource(ctx context.Context, snapshotName, namespace string) (*kansnapshot.Source, error) { return f.gsSrc, f.gsErr } func (f *fakeSnapshotter) CreateFromSource(ctx context.Context, source *kansnapshot.Source, waitForReady bool, snapshotMeta, contentMeta kansnapshot.ObjectMeta) error { return f.cfsErr } func (f *fakeSnapshotter) CreateContentFromSource(ctx context.Context, source *kansnapshot.Source, snapshotName, snapshotNs, deletionPolicy string, contentMeta kansnapshot.ObjectMeta) error { return nil } func (f *fakeSnapshotter) WaitOnReadyToUse(ctx context.Context, snapshotName, namespace string) error { return nil } func (f *fakeSnapshotter) List(ctx context.Context, namespace string, labels map[string]string) (*snapv1.VolumeSnapshotList, error) { return nil, nil } func (s *CSITestSuite) TestDeletePVC(c *C) { ctx := context.Background() for _, tc := range []struct { cli kubernetes.Interface pvcName string namespace string errChecker Checker }{ { cli: fake.NewSimpleClientset(), pvcName: "pvc", namespace: "ns", errChecker: NotNil, }, { cli: fake.NewSimpleClientset(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "notns", }, }), pvcName: "pvc", namespace: "ns", errChecker: NotNil, }, { cli: fake.NewSimpleClientset(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, }), pvcName: "pvc", namespace: "ns", errChecker: IsNil, }, { cli: nil, pvcName: "pvc", namespace: "ns", errChecker: NotNil, }, } { cleaner := NewCleaner(tc.cli, nil) err := cleaner.DeletePVC(ctx, tc.pvcName, tc.namespace) c.Check(err, tc.errChecker) } } func (s *CSITestSuite) TestDeletePod(c *C) { ctx := context.Background() for _, tc := range []struct { cli kubernetes.Interface podName string namespace string errChecker Checker }{ { cli: fake.NewSimpleClientset(), podName: "pod", namespace: "ns", errChecker: NotNil, }, { cli: fake.NewSimpleClientset(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "notns", }, }), podName: "pod", namespace: "ns", errChecker: NotNil, }, { cli: fake.NewSimpleClientset(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "ns", }, }), podName: "pod", namespace: "ns", errChecker: IsNil, }, { cli: nil, podName: "pod", namespace: "ns", errChecker: NotNil, }, } { cleaner := &cleanse{ kubeCli: tc.cli, } err := cleaner.DeletePod(ctx, tc.podName, tc.namespace) c.Check(err, tc.errChecker) } } func (s *CSITestSuite) TestDeleteSnapshot(c *C) { ctx := context.Background() for _, tc := range []struct { cli dynamic.Interface snapshotName string namespace string groupVersion *metav1.GroupVersionForDiscovery errChecker Checker }{ { cli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), snapshotName: "snap1", namespace: "ns", groupVersion: &metav1.GroupVersionForDiscovery{ Version: kansnapshot.Version, }, errChecker: NotNil, }, { cli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme(), &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": fmt.Sprintf("%s/%s", kansnapshot.GroupName, "v1beta1"), "kind": "VolumeSnapshot", "metadata": map[string]interface{}{ "name": "snap1", "namespace": "ns", }, }, }), snapshotName: "snap1", namespace: "ns", errChecker: NotNil, groupVersion: &metav1.GroupVersionForDiscovery{ Version: kansnapshot.Version, }, }, { cli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme(), &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": fmt.Sprintf("%s/%s", kansnapshot.GroupName, kansnapshot.Version), "kind": "VolumeSnapshot", "metadata": map[string]interface{}{ "name": "snap1", "namespace": "ns", }, }, }), snapshotName: "snap1", namespace: "ns", errChecker: IsNil, groupVersion: &metav1.GroupVersionForDiscovery{ Version: kansnapshot.Version, }, }, { cli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), snapshotName: "pod", namespace: "ns", errChecker: NotNil, }, { cli: nil, snapshotName: "pod", namespace: "ns", errChecker: NotNil, }, } { cleaner := NewCleaner(nil, tc.cli) err := cleaner.DeleteSnapshot(ctx, tc.snapshotName, tc.namespace, tc.groupVersion) c.Check(err, tc.errChecker) } } func (s *CSITestSuite) TestWaitForPVCReady(c *C) { ctx := context.Background() const ns = "ns" const pvc = "pvc" boundPVC := s.getPVC(ns, pvc, v1.ClaimBound) claimLostPVC := s.getPVC(ns, pvc, v1.ClaimLost) stuckPVC := s.getPVC(ns, pvc, "") normalGetFunc := func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return } deadlineExceededGetFunc := func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, pkgerrors.Wrapf(context.DeadlineExceeded, "some wrapped error") } warningEvent := v1.Event{ Type: v1.EventTypeWarning, Message: "waiting for a volume to be created, either by external provisioner \"ceph.com/rbd\" or manually created by system administrator", } for _, tc := range []struct { description string cli kubernetes.Interface pvcGetFunc func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) eventsList []v1.Event errChecker Checker errString string }{ { description: "Happy path", cli: fake.NewSimpleClientset(boundPVC), pvcGetFunc: normalGetFunc, errChecker: IsNil, }, { description: "Missing PVC", cli: fake.NewSimpleClientset(), pvcGetFunc: normalGetFunc, errChecker: NotNil, errString: "could not find PVC", }, { description: "PVC ClaimLost", cli: fake.NewSimpleClientset(claimLostPVC), pvcGetFunc: normalGetFunc, errChecker: NotNil, errString: "ClaimLost", }, { description: "context.DeadlineExceeded but no event warnings", cli: fake.NewSimpleClientset(stuckPVC), pvcGetFunc: deadlineExceededGetFunc, errChecker: NotNil, errString: context.DeadlineExceeded.Error(), }, { description: "context.DeadlineExceeded, unable to provision PVC", cli: fake.NewSimpleClientset(stuckPVC), pvcGetFunc: deadlineExceededGetFunc, eventsList: []v1.Event{warningEvent}, errChecker: NotNil, errString: warningEvent.Message, }, } { fmt.Println("test:", tc.description) creator := &applicationCreate{kubeCli: tc.cli} creator.kubeCli.(*fake.Clientset).PrependReactor("get", "persistentvolumeclaims", tc.pvcGetFunc) creator.kubeCli.(*fake.Clientset).PrependReactor("list", "events", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, &v1.EventList{Items: tc.eventsList}, nil }) err := creator.WaitForPVCReady(ctx, ns, pvc) c.Check(err, tc.errChecker) if err != nil { c.Assert(strings.Contains(err.Error(), tc.errString), Equals, true) } } } func (s *CSITestSuite) getPVC(ns, pvc string, phase v1.PersistentVolumeClaimPhase) *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: pvc, Namespace: ns, }, Status: v1.PersistentVolumeClaimStatus{ Phase: phase, }, } } func (s *CSITestSuite) TestWaitForPodReady(c *C) { ctx := context.Background() const ns = "ns" const podName = "pod" readyPod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, Name: podName, }, Spec: v1.PodSpec{ Containers: []v1.Container{ {Name: "container-0"}, }, }, Status: v1.PodStatus{ Phase: v1.PodRunning, }, } warningEvent := v1.Event{ Type: v1.EventTypeWarning, Message: "warning event", } for _, tc := range []struct { description string cli kubernetes.Interface eventsList []v1.Event errChecker Checker errString string }{ { description: "Happy path", cli: fake.NewSimpleClientset(readyPod), errChecker: IsNil, }, { description: "Not found", cli: fake.NewSimpleClientset(), errChecker: NotNil, errString: "not found", }, { description: "Pod events", cli: fake.NewSimpleClientset(), errChecker: NotNil, errString: "had issues creating Pod", eventsList: []v1.Event{warningEvent}, }, { description: "No CLI", errChecker: NotNil, errString: "kubeCli not initialized", }, } { fmt.Println("TestWaitForPodReady:", tc.description) creator := &applicationCreate{kubeCli: tc.cli} if len(tc.eventsList) > 0 { creator.kubeCli.(*fake.Clientset).PrependReactor("list", "events", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, &v1.EventList{Items: tc.eventsList}, nil }) } err := creator.WaitForPodReady(ctx, ns, podName) c.Check(err, tc.errChecker) if err != nil { c.Assert(strings.Contains(err.Error(), tc.errString), Equals, true) } } } ================================================ FILE: pkg/csi/file_restore_inspector.go ================================================ package csi import ( "bytes" "context" "fmt" "os" "os/signal" "sync" "syscall" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) type FileRestoreRunner struct { KubeCli kubernetes.Interface DynCli dynamic.Interface restoreSteps FileRestoreStepper restorePVC *v1.PersistentVolumeClaim pod *v1.Pod snapshot *snapv1.VolumeSnapshot } func (f *FileRestoreRunner) RunFileRestore(ctx context.Context, args *types.FileRestoreArgs) error { f.restoreSteps = &fileRestoreSteps{ validateOps: &validateOperations{ kubeCli: f.KubeCli, dynCli: f.DynCli, }, versionFetchOps: &apiVersionFetch{ kubeCli: f.KubeCli, }, createAppOps: &applicationCreate{ kubeCli: f.KubeCli, }, portForwardOps: &portforward{}, kubeExecutor: &kubeExec{ kubeCli: f.KubeCli, }, cleanerOps: &cleanse{ kubeCli: f.KubeCli, dynCli: f.DynCli, }, } return f.RunFileRestoreHelper(ctx, args) } func (f *FileRestoreRunner) RunFileRestoreHelper(ctx context.Context, args *types.FileRestoreArgs) error { defer func() { f.restoreSteps.Cleanup(ctx, args, f.restorePVC, f.pod) }() if f.KubeCli == nil || f.DynCli == nil { return fmt.Errorf("cli uninitialized") } fmt.Println("Fetching the snapshot or PVC.") vs, restorePVC, sourcePVC, sc, err := f.restoreSteps.ValidateArgs(ctx, args) if err != nil { return errors.Wrap(err, "failed to validate arguments.") } f.snapshot = vs fmt.Println("Creating the browser pod & mounting the PVCs.") var restoreMountPath string f.pod, f.restorePVC, restoreMountPath, err = f.restoreSteps.CreateInspectorApplication(ctx, args, f.snapshot, restorePVC, sourcePVC, sc) if err != nil { return errors.Wrap(err, "failed to create inspector application.") } if args.Path != "" { fmt.Printf("Restoring the file %s\n", args.Path) _, err := f.restoreSteps.ExecuteCopyCommand(ctx, args, f.pod, restoreMountPath) if err != nil { return errors.Wrap(err, "failed to execute cp command in pod.") } if args.FromSnapshotName != "" { fmt.Printf("File restored from VolumeSnapshot %s to Source PVC %s.\n", f.snapshot.Name, sourcePVC.Name) } else { fmt.Printf("File restored from PVC %s to Source PVC %s.\n", f.restorePVC.Name, sourcePVC.Name) } return nil } fmt.Println("Forwarding the port.") err = f.restoreSteps.PortForwardAPod(f.pod, args.LocalPort) if err != nil { return errors.Wrap(err, "failed to port forward Pod.") } return nil } //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_file_restore_stepper.go -package=mocks . FileRestoreStepper type FileRestoreStepper interface { ValidateArgs(ctx context.Context, args *types.FileRestoreArgs) (*snapv1.VolumeSnapshot, *v1.PersistentVolumeClaim, *v1.PersistentVolumeClaim, *sv1.StorageClass, error) CreateInspectorApplication(ctx context.Context, args *types.FileRestoreArgs, snapshot *snapv1.VolumeSnapshot, restorePVC *v1.PersistentVolumeClaim, sourcePVC *v1.PersistentVolumeClaim, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, string, error) ExecuteCopyCommand(ctx context.Context, args *types.FileRestoreArgs, pod *v1.Pod, restoreMountPath string) (string, error) PortForwardAPod(pod *v1.Pod, localPort int) error Cleanup(ctx context.Context, args *types.FileRestoreArgs, restorePVC *v1.PersistentVolumeClaim, pod *v1.Pod) } type fileRestoreSteps struct { validateOps ArgumentValidator versionFetchOps ApiVersionFetcher createAppOps ApplicationCreator portForwardOps PortForwarder cleanerOps Cleaner kubeExecutor KubeExecutor SnapshotGroupVersion *metav1.GroupVersionForDiscovery } func (f *fileRestoreSteps) ValidateArgs(ctx context.Context, args *types.FileRestoreArgs) (*snapv1.VolumeSnapshot, *v1.PersistentVolumeClaim, *v1.PersistentVolumeClaim, *sv1.StorageClass, error) { if err := args.Validate(); err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to validate input arguments") } if err := f.validateOps.ValidateNamespace(ctx, args.Namespace); err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to validate Namespace") } groupVersion, err := f.versionFetchOps.GetCSISnapshotGroupVersion() if err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to fetch groupVersion") } f.SnapshotGroupVersion = groupVersion var snapshot *snapv1.VolumeSnapshot var restorePVC, sourcePVC *v1.PersistentVolumeClaim var sc *sv1.StorageClass if args.FromSnapshotName != "" { fmt.Println("Fetching the snapshot.") snapshot, err := f.validateOps.ValidateVolumeSnapshot(ctx, args.FromSnapshotName, args.Namespace, groupVersion) if err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to validate VolumeSnapshot") } if args.ToPVCName == "" { fmt.Println("Fetching the source PVC from snapshot.") if *snapshot.Spec.Source.PersistentVolumeClaimName == "" { return nil, nil, nil, nil, errors.Wrap(err, "failed to fetch source PVC. VolumeSnapshot does not have a PVC as it's source") } sourcePVC, err = f.validateOps.ValidatePVC(ctx, *snapshot.Spec.Source.PersistentVolumeClaimName, args.Namespace) if err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to validate source PVC") } } else { fmt.Println("Fetching the source PVC.") sourcePVC, err = f.validateOps.ValidatePVC(ctx, args.ToPVCName, args.Namespace) if err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to validate source PVC") } } sc, err = f.validateOps.ValidateStorageClass(ctx, *sourcePVC.Spec.StorageClassName) if err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to validate StorageClass for source PVC") } uVSC, err := f.validateOps.ValidateVolumeSnapshotClass(ctx, *snapshot.Spec.VolumeSnapshotClassName, groupVersion) if err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to validate VolumeSnapshotClass") } vscDriver := getDriverNameFromUVSC(*uVSC, groupVersion.GroupVersion) if sc.Provisioner != vscDriver { return nil, nil, nil, nil, fmt.Errorf("provisioner for StorageClass (%s) and VolumeSnapshotClass driver (%s) are different", sc.Provisioner, vscDriver) } } else { fmt.Println("Fetching the restore PVC.") restorePVC, err = f.validateOps.ValidatePVC(ctx, args.FromPVCName, args.Namespace) if err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to validate restore PVC") } fmt.Println("Fetching the source PVC.") sourcePVC, err = f.validateOps.ValidatePVC(ctx, args.ToPVCName, args.Namespace) if err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to validate source PVC") } _, err = f.validateOps.ValidateStorageClass(ctx, *restorePVC.Spec.StorageClassName) if err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to validate StorageClass for restore PVC") } sc, err = f.validateOps.ValidateStorageClass(ctx, *sourcePVC.Spec.StorageClassName) if err != nil { return nil, nil, nil, nil, errors.Wrap(err, "failed to validate StorageClass for source PVC") } } for _, sourceAccessMode := range sourcePVC.Spec.AccessModes { if sourceAccessMode == v1.ReadWriteOncePod { return nil, nil, nil, nil, fmt.Errorf("unsupported %s AccessMode found in source PVC. Supported AccessModes are ReadOnlyMany & ReadWriteMany", sourceAccessMode) } } return snapshot, restorePVC, sourcePVC, sc, nil } func (f *fileRestoreSteps) CreateInspectorApplication(ctx context.Context, args *types.FileRestoreArgs, snapshot *snapv1.VolumeSnapshot, restorePVC *v1.PersistentVolumeClaim, sourcePVC *v1.PersistentVolumeClaim, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, string, error) { restoreMountPath := "/restore-pvc-data" if args.FromSnapshotName != "" { snapshotAPIGroup := "snapshot.storage.k8s.io" snapshotKind := "VolumeSnapshot" dataSource := &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: snapshotKind, Name: snapshot.Name, } pvcArgs := &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: storageClass.Name, Namespace: args.Namespace, DataSource: dataSource, RestoreSize: snapshot.Status.RestoreSize, } var err error restorePVC, err = f.createAppOps.CreatePVC(ctx, pvcArgs) if err != nil { return nil, nil, "", errors.Wrap(err, "failed to restore PVC") } restoreMountPath = "/snapshot-data" } podArgs := &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: args.Namespace, RunAsUser: args.RunAsUser, ContainerImage: "filebrowser/filebrowser:v2", ContainerArgs: []string{"--noauth"}, PVCMap: map[string]types.VolumePath{ restorePVC.Name: { MountPath: fmt.Sprintf("/srv%s", restoreMountPath), }, sourcePVC.Name: { MountPath: "/srv/source-data", }, }, } if args.Path != "" { podArgs = &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: args.Namespace, RunAsUser: args.RunAsUser, ContainerImage: "alpine:3.19", Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", "while true; do sleep 3600; done"}, PVCMap: map[string]types.VolumePath{ restorePVC.Name: { MountPath: restoreMountPath, }, sourcePVC.Name: { MountPath: "/source-data", }, }, } } pod, err := f.createAppOps.CreatePod(ctx, podArgs) if err != nil { return nil, restorePVC, "", errors.Wrap(err, "failed to create browse Pod") } if err = f.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil { return pod, restorePVC, "", errors.Wrap(err, "pod failed to become ready") } return pod, restorePVC, restoreMountPath, nil } func (f *fileRestoreSteps) ExecuteCopyCommand(ctx context.Context, args *types.FileRestoreArgs, pod *v1.Pod, restoreMountPath string) (string, error) { command := []string{"cp", "-rf", fmt.Sprintf("%s%s", restoreMountPath, args.Path), fmt.Sprintf("/source-data%s", args.Path)} stdout, err := f.kubeExecutor.Exec(ctx, args.Namespace, pod.Name, pod.Spec.Containers[0].Name, command) if err != nil { return "", errors.Wrapf(err, "error running command:(%v)", command) } return stdout, nil } func (f *fileRestoreSteps) PortForwardAPod(pod *v1.Pod, localPort int) error { var wg sync.WaitGroup wg.Add(1) stopChan, readyChan, errChan := make(chan struct{}, 1), make(chan struct{}, 1), make(chan string) out, errOut := new(bytes.Buffer), new(bytes.Buffer) cfg, err := f.portForwardOps.FetchRestConfig() if err != nil { return errors.New("failed to fetch rest config") } sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) go func() { <-sigs fmt.Println("\nStopping port forward.") close(stopChan) wg.Done() }() go func() { pfArgs := &types.PortForwardAPodRequest{ RestConfig: cfg, Pod: pod, LocalPort: localPort, PodPort: 80, OutStream: bytes.Buffer(*out), ErrOutStream: bytes.Buffer(*errOut), StopCh: stopChan, ReadyCh: readyChan, } err = f.portForwardOps.PortForwardAPod(pfArgs) if err != nil { errChan <- fmt.Sprintf("Failed to port forward (%s)", err.Error()) } }() select { case <-readyChan: url := fmt.Sprintf("http://localhost:%d/", localPort) fmt.Printf("Port forwarding is ready to get traffic. visit %s\n", url) openbrowser(url) wg.Wait() case msg := <-errChan: return errors.New(msg) } return nil } func (f *fileRestoreSteps) Cleanup(ctx context.Context, args *types.FileRestoreArgs, restorePVC *v1.PersistentVolumeClaim, pod *v1.Pod) { if args.FromSnapshotName != "" { fmt.Println("Cleaning up restore PVC.") if restorePVC != nil { err := f.cleanerOps.DeletePVC(ctx, restorePVC.Name, restorePVC.Namespace) if err != nil { fmt.Println("Failed to delete restore PVC", restorePVC) } } } fmt.Println("Cleaning up browser pod.") if pod != nil { err := f.cleanerOps.DeletePod(ctx, pod.Name, pod.Namespace) if err != nil { fmt.Println("Failed to delete Pod", pod) } } } ================================================ FILE: pkg/csi/file_restore_inspector_steps_test.go ================================================ package csi import ( "context" "fmt" "k8s.io/apimachinery/pkg/api/resource" "github.com/golang/mock/gomock" "github.com/kastenhq/kubestr/pkg/common" "github.com/kastenhq/kubestr/pkg/csi/mocks" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" . "gopkg.in/check.v1" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) func (s *CSITestSuite) TestFileRestoreValidateArgs(c *C) { ctx := context.Background() scName := "sc" vscName := "vsc" pvcName := "pvc" type fields struct { validateOps *mocks.MockArgumentValidator versionOps *mocks.MockApiVersionFetcher } for _, tc := range []struct { args *types.FileRestoreArgs prepare func(f *fields) errChecker Checker }{ { // valid args args: &types.FileRestoreArgs{ FromSnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return( &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), "vs", "ns", gomock.Any()).Return( &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", Namespace: "ns", }, Spec: snapv1.VolumeSnapshotSpec{ Source: snapv1.VolumeSnapshotSource{ PersistentVolumeClaimName: &pvcName, }, VolumeSnapshotClassName: &vscName, }, }, nil, ), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "pvc", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), scName).Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }).Return(&unstructured.Unstructured{ Object: map[string]interface{}{ common.VolSnapClassDriverKey: "p1", }, }, nil), ) }, errChecker: IsNil, }, { // valid args args: &types.FileRestoreArgs{ FromPVCName: "restorePVC", ToPVCName: "sourcePVC", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return( &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }, nil), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "restorePVC", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "restorePVC", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "sourcePVC", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "sourcePVC", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), scName).Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), scName).Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), ) }, errChecker: IsNil, }, { // driver mismatch args: &types.FileRestoreArgs{ FromSnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return( &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), "vs", "ns", gomock.Any()).Return( &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", Namespace: "ns", }, Spec: snapv1.VolumeSnapshotSpec{ Source: snapv1.VolumeSnapshotSource{ PersistentVolumeClaimName: &pvcName, }, VolumeSnapshotClassName: &vscName, }, }, nil, ), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "pvc", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }).Return(&unstructured.Unstructured{ Object: map[string]interface{}{ common.VolSnapClassDriverKey: "p2", }, }, nil), ) }, errChecker: NotNil, }, { // vsc error args: &types.FileRestoreArgs{ FromSnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil), f.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), "vs", "ns", gomock.Any()).Return( &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", Namespace: "ns", }, Spec: snapv1.VolumeSnapshotSpec{ Source: snapv1.VolumeSnapshotSource{ PersistentVolumeClaimName: &pvcName, }, VolumeSnapshotClassName: &vscName, }, }, nil, ), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "pvc", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("vsc error")), ) }, errChecker: NotNil, }, { // get driver versionn error args: &types.FileRestoreArgs{ FromSnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, fmt.Errorf("driver version error")), ) }, errChecker: NotNil, }, { // sc error args: &types.FileRestoreArgs{ FromSnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil), f.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), "vs", "ns", gomock.Any()).Return( &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", Namespace: "ns", }, Spec: snapv1.VolumeSnapshotSpec{ Source: snapv1.VolumeSnapshotSource{ PersistentVolumeClaimName: &pvcName, }, VolumeSnapshotClassName: &vscName, }, }, nil, ), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "pvc", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("sc error")), ) }, errChecker: NotNil, }, { // validate vs error args: &types.FileRestoreArgs{ FromSnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil), f.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("validate vs error")), ) }, errChecker: NotNil, }, { // validate ns error args: &types.FileRestoreArgs{ FromSnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(fmt.Errorf("validate ns error")), ) }, errChecker: NotNil, }, { // validate vs error args: &types.FileRestoreArgs{ FromSnapshotName: "", Namespace: "ns", }, errChecker: NotNil, }, { // validate ns error args: &types.FileRestoreArgs{ FromSnapshotName: "dfd", Namespace: "", }, errChecker: NotNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ validateOps: mocks.NewMockArgumentValidator(ctrl), versionOps: mocks.NewMockApiVersionFetcher(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &fileRestoreSteps{ validateOps: f.validateOps, versionFetchOps: f.versionOps, } _, _, _, _, err := stepper.ValidateArgs(ctx, tc.args) c.Check(err, tc.errChecker) } } func (s *CSITestSuite) TestCreateInspectorApplicationForFileRestore(c *C) { ctx := context.Background() resourceQuantity := resource.MustParse("1Gi") snapshotAPIGroup := "snapshot.storage.k8s.io" type fields struct { createAppOps *mocks.MockApplicationCreator } for _, tc := range []struct { args *types.FileRestoreArgs fromSnapshot *snapv1.VolumeSnapshot fromPVC *v1.PersistentVolumeClaim sc *sv1.StorageClass prepare func(f *fields) errChecker Checker podChecker Checker pvcChecker Checker }{ { args: &types.FileRestoreArgs{ Namespace: "ns", RunAsUser: 100, FromSnapshotName: "vs", }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, fromSnapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, fromPVC: &v1.PersistentVolumeClaim{}, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: "sc", Namespace: "ns", DataSource: &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: "VolumeSnapshot", Name: "vs", }, RestoreSize: &resourceQuantity, }).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "restorePVC", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: "ns", ContainerArgs: []string{"--noauth"}, RunAsUser: 100, ContainerImage: "filebrowser/filebrowser:v2", PVCMap: map[string]types.VolumePath{ "restorePVC": { MountPath: "/srv/snapshot-data", }, "sourcePVC": { MountPath: "/srv/source-data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", }, }, nil), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod").Return(nil), ) }, errChecker: IsNil, podChecker: NotNil, pvcChecker: NotNil, }, { args: &types.FileRestoreArgs{ Namespace: "ns", RunAsUser: 100, FromPVCName: "restorePVC", }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, fromSnapshot: &snapv1.VolumeSnapshot{}, fromPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "restorePVC", }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: "ns", ContainerArgs: []string{"--noauth"}, RunAsUser: 100, ContainerImage: "filebrowser/filebrowser:v2", PVCMap: map[string]types.VolumePath{ "restorePVC": { MountPath: "/srv/restore-pvc-data", }, "sourcePVC": { MountPath: "/srv/source-data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", }, }, nil), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod").Return(nil), ) }, errChecker: IsNil, podChecker: NotNil, pvcChecker: NotNil, }, { args: &types.FileRestoreArgs{ Namespace: "ns", RunAsUser: 100, FromSnapshotName: "vs", }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, fromSnapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, fromPVC: &v1.PersistentVolumeClaim{}, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: "sc", Namespace: "ns", DataSource: &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: "VolumeSnapshot", Name: "vs", }, RestoreSize: &resourceQuantity, }).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "restorePVC", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: "ns", ContainerArgs: []string{"--noauth"}, RunAsUser: 100, ContainerImage: "filebrowser/filebrowser:v2", PVCMap: map[string]types.VolumePath{ "restorePVC": { MountPath: "/srv/snapshot-data", }, "sourcePVC": { MountPath: "/srv/source-data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", }, }, nil), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod").Return(fmt.Errorf("pod ready error")), ) }, errChecker: NotNil, podChecker: NotNil, pvcChecker: NotNil, }, { args: &types.FileRestoreArgs{ Namespace: "ns", RunAsUser: 100, FromSnapshotName: "vs", }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, fromSnapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, fromPVC: &v1.PersistentVolumeClaim{}, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "restorePVC", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("pod error")), ) }, errChecker: NotNil, podChecker: IsNil, pvcChecker: NotNil, }, { args: &types.FileRestoreArgs{ Namespace: "ns", RunAsUser: 100, FromSnapshotName: "vs", }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, fromSnapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, fromPVC: &v1.PersistentVolumeClaim{}, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("error")), ) }, errChecker: NotNil, podChecker: IsNil, pvcChecker: IsNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ createAppOps: mocks.NewMockApplicationCreator(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &fileRestoreSteps{ createAppOps: f.createAppOps, } sourcePVC := v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "sourcePVC", Namespace: tc.args.Namespace, }, Spec: v1.PersistentVolumeClaimSpec{ AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, }, Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: resource.MustParse("1Gi"), }, }, }, } pod, pvc, _, err := stepper.CreateInspectorApplication(ctx, tc.args, tc.fromSnapshot, tc.fromPVC, &sourcePVC, tc.sc) c.Check(err, tc.errChecker) c.Check(pod, tc.podChecker) c.Check(pvc, tc.pvcChecker) } } func (s *CSITestSuite) TestFileRestoreCleanup(c *C) { ctx := context.Background() groupversion := &metav1.GroupVersionForDiscovery{ GroupVersion: "gv", Version: "v", } type fields struct { cleanerOps *mocks.MockCleaner } for _, tc := range []struct { args *types.FileRestoreArgs restorePVC *v1.PersistentVolumeClaim pod *v1.Pod prepare func(f *fields) }{ { args: &types.FileRestoreArgs{ Namespace: "ns", RunAsUser: 100, FromSnapshotName: "vs", }, restorePVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "restorePVC", Namespace: "ns", }, }, pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "ns", }, }, prepare: func(f *fields) { gomock.InOrder( f.cleanerOps.EXPECT().DeletePVC(ctx, "restorePVC", "ns").Return(nil), f.cleanerOps.EXPECT().DeletePod(ctx, "pod", "ns").Return(nil), ) }, }, { args: &types.FileRestoreArgs{ Namespace: "ns", RunAsUser: 100, FromSnapshotName: "", FromPVCName: "restorePVC", ToPVCName: "sourcePVC", }, restorePVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "restorePVC", Namespace: "ns", }, }, pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "ns", }, }, prepare: func(f *fields) { gomock.InOrder( f.cleanerOps.EXPECT().DeletePod(ctx, "pod", "ns").Return(nil), ) }, }, { args: &types.FileRestoreArgs{ Namespace: "ns", RunAsUser: 100, FromSnapshotName: "vs", }, restorePVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "restorePVC", Namespace: "ns", }, }, pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "ns", }, }, prepare: func(f *fields) { gomock.InOrder( f.cleanerOps.EXPECT().DeletePVC(ctx, "restorePVC", "ns").Return(fmt.Errorf("err")), f.cleanerOps.EXPECT().DeletePod(ctx, "pod", "ns").Return(fmt.Errorf("err")), ) }, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ cleanerOps: mocks.NewMockCleaner(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &fileRestoreSteps{ cleanerOps: f.cleanerOps, SnapshotGroupVersion: groupversion, } stepper.Cleanup(ctx, tc.args, tc.restorePVC, tc.pod) } } ================================================ FILE: pkg/csi/file_restore_inspector_test.go ================================================ package csi import ( "context" "fmt" "github.com/golang/mock/gomock" "github.com/kastenhq/kubestr/pkg/csi/mocks" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" . "gopkg.in/check.v1" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" fakedynamic "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) func (s *CSITestSuite) TestRunFileRestoreHelper(c *C) { ctx := context.Background() type fields struct { stepperOps *mocks.MockFileRestoreStepper } for _, tc := range []struct { kubeCli kubernetes.Interface dynCli dynamic.Interface args *types.FileRestoreArgs prepare func(f *fields) errChecker Checker }{ { // success kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.FileRestoreArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return( &snapv1.VolumeSnapshot{}, &v1.PersistentVolumeClaim{}, &v1.PersistentVolumeClaim{}, &sv1.StorageClass{}, nil, ), f.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), &snapv1.VolumeSnapshot{}, &v1.PersistentVolumeClaim{}, &v1.PersistentVolumeClaim{}, &sv1.StorageClass{}, ).Return( &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, "", nil, ), f.stepperOps.EXPECT().PortForwardAPod( &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, gomock.Any(), ).Return(nil), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, ), ) }, errChecker: IsNil, }, { // portforward failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.FileRestoreArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil, nil, nil), f.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, "", nil), f.stepperOps.EXPECT().PortForwardAPod(gomock.Any(), gomock.Any()).Return(fmt.Errorf("portforward error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // createapp failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.FileRestoreArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil, nil, nil), f.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, "", fmt.Errorf("createapp error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // fetch snapshot failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.FileRestoreArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil, nil, fmt.Errorf("snapshot error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // validate failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.FileRestoreArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil, nil, fmt.Errorf("validate error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // emptycli failure kubeCli: nil, dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.FileRestoreArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // emptydyncli failure kubeCli: fake.NewSimpleClientset(), dynCli: nil, args: &types.FileRestoreArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ stepperOps: mocks.NewMockFileRestoreStepper(ctrl), } if tc.prepare != nil { tc.prepare(&f) } runner := &FileRestoreRunner{ KubeCli: tc.kubeCli, DynCli: tc.dynCli, restoreSteps: f.stepperOps, } err := runner.RunFileRestoreHelper(ctx, tc.args) c.Check(err, tc.errChecker) } } func (s *CSITestSuite) TestFileRestoreRunner(c *C) { ctx := context.Background() r := &FileRestoreRunner{ restoreSteps: &fileRestoreSteps{}, } args := types.FileRestoreArgs{} err := r.RunFileRestoreHelper(ctx, &args) c.Check(err, NotNil) } ================================================ FILE: pkg/csi/mocks/mock_api_version_fetcher.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: ApiVersionFetcher) // Package mocks is a generated GoMock package. package mocks import ( reflect "reflect" gomock "github.com/golang/mock/gomock" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // MockApiVersionFetcher is a mock of ApiVersionFetcher interface. type MockApiVersionFetcher struct { ctrl *gomock.Controller recorder *MockApiVersionFetcherMockRecorder } // MockApiVersionFetcherMockRecorder is the mock recorder for MockApiVersionFetcher. type MockApiVersionFetcherMockRecorder struct { mock *MockApiVersionFetcher } // NewMockApiVersionFetcher creates a new mock instance. func NewMockApiVersionFetcher(ctrl *gomock.Controller) *MockApiVersionFetcher { mock := &MockApiVersionFetcher{ctrl: ctrl} mock.recorder = &MockApiVersionFetcherMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockApiVersionFetcher) EXPECT() *MockApiVersionFetcherMockRecorder { return m.recorder } // GetCSISnapshotGroupVersion mocks base method. func (m *MockApiVersionFetcher) GetCSISnapshotGroupVersion() (*v1.GroupVersionForDiscovery, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCSISnapshotGroupVersion") ret0, _ := ret[0].(*v1.GroupVersionForDiscovery) ret1, _ := ret[1].(error) return ret0, ret1 } // GetCSISnapshotGroupVersion indicates an expected call of GetCSISnapshotGroupVersion. func (mr *MockApiVersionFetcherMockRecorder) GetCSISnapshotGroupVersion() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCSISnapshotGroupVersion", reflect.TypeOf((*MockApiVersionFetcher)(nil).GetCSISnapshotGroupVersion)) } ================================================ FILE: pkg/csi/mocks/mock_application_creator.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: ApplicationCreator) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" types "github.com/kastenhq/kubestr/pkg/csi/types" v1 "k8s.io/api/core/v1" ) // MockApplicationCreator is a mock of ApplicationCreator interface. type MockApplicationCreator struct { ctrl *gomock.Controller recorder *MockApplicationCreatorMockRecorder } // MockApplicationCreatorMockRecorder is the mock recorder for MockApplicationCreator. type MockApplicationCreatorMockRecorder struct { mock *MockApplicationCreator } // NewMockApplicationCreator creates a new mock instance. func NewMockApplicationCreator(ctrl *gomock.Controller) *MockApplicationCreator { mock := &MockApplicationCreator{ctrl: ctrl} mock.recorder = &MockApplicationCreatorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockApplicationCreator) EXPECT() *MockApplicationCreatorMockRecorder { return m.recorder } // CreatePVC mocks base method. func (m *MockApplicationCreator) CreatePVC(arg0 context.Context, arg1 *types.CreatePVCArgs) (*v1.PersistentVolumeClaim, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreatePVC", arg0, arg1) ret0, _ := ret[0].(*v1.PersistentVolumeClaim) ret1, _ := ret[1].(error) return ret0, ret1 } // CreatePVC indicates an expected call of CreatePVC. func (mr *MockApplicationCreatorMockRecorder) CreatePVC(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePVC", reflect.TypeOf((*MockApplicationCreator)(nil).CreatePVC), arg0, arg1) } // CreatePod mocks base method. func (m *MockApplicationCreator) CreatePod(arg0 context.Context, arg1 *types.CreatePodArgs) (*v1.Pod, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreatePod", arg0, arg1) ret0, _ := ret[0].(*v1.Pod) ret1, _ := ret[1].(error) return ret0, ret1 } // CreatePod indicates an expected call of CreatePod. func (mr *MockApplicationCreatorMockRecorder) CreatePod(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePod", reflect.TypeOf((*MockApplicationCreator)(nil).CreatePod), arg0, arg1) } // WaitForPodReady mocks base method. func (m *MockApplicationCreator) WaitForPodReady(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WaitForPodReady", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // WaitForPodReady indicates an expected call of WaitForPodReady. func (mr *MockApplicationCreatorMockRecorder) WaitForPodReady(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForPodReady", reflect.TypeOf((*MockApplicationCreator)(nil).WaitForPodReady), arg0, arg1, arg2) } func (m *MockApplicationCreator) WaitForPVCReady(ctx context.Context, namespace string, pvcName string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WaitForPVCReady", ctx, namespace, pvcName) err, _ := ret[0].(error) return err } // WaitForPodReady indicates an expected call of WaitForPVCReady. func (mr *MockApplicationCreatorMockRecorder) WaitForPVCReady(ctx, namespace, pvcName interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForPVCReady", reflect.TypeOf((*MockApplicationCreator)(nil).WaitForPVCReady), ctx, namespace, pvcName) } ================================================ FILE: pkg/csi/mocks/mock_argument_validator.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: ArgumentValidator) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" v10 "k8s.io/api/core/v1" v11 "k8s.io/api/storage/v1" v12 "k8s.io/apimachinery/pkg/apis/meta/v1" unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) // MockArgumentValidator is a mock of ArgumentValidator interface. type MockArgumentValidator struct { ctrl *gomock.Controller recorder *MockArgumentValidatorMockRecorder } // MockArgumentValidatorMockRecorder is the mock recorder for MockArgumentValidator. type MockArgumentValidatorMockRecorder struct { mock *MockArgumentValidator } // NewMockArgumentValidator creates a new mock instance. func NewMockArgumentValidator(ctrl *gomock.Controller) *MockArgumentValidator { mock := &MockArgumentValidator{ctrl: ctrl} mock.recorder = &MockArgumentValidatorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockArgumentValidator) EXPECT() *MockArgumentValidatorMockRecorder { return m.recorder } // FetchPV mocks base method. func (m *MockArgumentValidator) FetchPV(arg0 context.Context, arg1 string) (*v10.PersistentVolume, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchPV", arg0, arg1) ret0, _ := ret[0].(*v10.PersistentVolume) ret1, _ := ret[1].(error) return ret0, ret1 } // FetchPV indicates an expected call of FetchPV. func (mr *MockArgumentValidatorMockRecorder) FetchPV(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchPV", reflect.TypeOf((*MockArgumentValidator)(nil).FetchPV), arg0, arg1) } // ValidateNamespace mocks base method. func (m *MockArgumentValidator) ValidateNamespace(arg0 context.Context, arg1 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateNamespace", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // ValidateNamespace indicates an expected call of ValidateNamespace. func (mr *MockArgumentValidatorMockRecorder) ValidateNamespace(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNamespace", reflect.TypeOf((*MockArgumentValidator)(nil).ValidateNamespace), arg0, arg1) } // ValidatePVC mocks base method. func (m *MockArgumentValidator) ValidatePVC(arg0 context.Context, arg1, arg2 string) (*v10.PersistentVolumeClaim, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidatePVC", arg0, arg1, arg2) ret0, _ := ret[0].(*v10.PersistentVolumeClaim) ret1, _ := ret[1].(error) return ret0, ret1 } // ValidatePVC indicates an expected call of ValidatePVC. func (mr *MockArgumentValidatorMockRecorder) ValidatePVC(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatePVC", reflect.TypeOf((*MockArgumentValidator)(nil).ValidatePVC), arg0, arg1, arg2) } // ValidateStorageClass mocks base method. func (m *MockArgumentValidator) ValidateStorageClass(arg0 context.Context, arg1 string) (*v11.StorageClass, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateStorageClass", arg0, arg1) ret0, _ := ret[0].(*v11.StorageClass) ret1, _ := ret[1].(error) return ret0, ret1 } // ValidateStorageClass indicates an expected call of ValidateStorageClass. func (mr *MockArgumentValidatorMockRecorder) ValidateStorageClass(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateStorageClass", reflect.TypeOf((*MockArgumentValidator)(nil).ValidateStorageClass), arg0, arg1) } // ValidateVolumeSnapshot mocks base method. func (m *MockArgumentValidator) ValidateVolumeSnapshot(arg0 context.Context, arg1, arg2 string, arg3 *v12.GroupVersionForDiscovery) (*v1.VolumeSnapshot, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateVolumeSnapshot", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*v1.VolumeSnapshot) ret1, _ := ret[1].(error) return ret0, ret1 } // ValidateVolumeSnapshot indicates an expected call of ValidateVolumeSnapshot. func (mr *MockArgumentValidatorMockRecorder) ValidateVolumeSnapshot(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVolumeSnapshot", reflect.TypeOf((*MockArgumentValidator)(nil).ValidateVolumeSnapshot), arg0, arg1, arg2, arg3) } // ValidateVolumeSnapshotClass mocks base method. func (m *MockArgumentValidator) ValidateVolumeSnapshotClass(arg0 context.Context, arg1 string, arg2 *v12.GroupVersionForDiscovery) (*unstructured.Unstructured, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateVolumeSnapshotClass", arg0, arg1, arg2) ret0, _ := ret[0].(*unstructured.Unstructured) ret1, _ := ret[1].(error) return ret0, ret1 } // ValidateVolumeSnapshotClass indicates an expected call of ValidateVolumeSnapshotClass. func (mr *MockArgumentValidatorMockRecorder) ValidateVolumeSnapshotClass(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVolumeSnapshotClass", reflect.TypeOf((*MockArgumentValidator)(nil).ValidateVolumeSnapshotClass), arg0, arg1, arg2) } ================================================ FILE: pkg/csi/mocks/mock_cleaner.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: Cleaner) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // MockCleaner is a mock of Cleaner interface. type MockCleaner struct { ctrl *gomock.Controller recorder *MockCleanerMockRecorder } // MockCleanerMockRecorder is the mock recorder for MockCleaner. type MockCleanerMockRecorder struct { mock *MockCleaner } // NewMockCleaner creates a new mock instance. func NewMockCleaner(ctrl *gomock.Controller) *MockCleaner { mock := &MockCleaner{ctrl: ctrl} mock.recorder = &MockCleanerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockCleaner) EXPECT() *MockCleanerMockRecorder { return m.recorder } // DeletePVC mocks base method. func (m *MockCleaner) DeletePVC(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeletePVC", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // DeletePVC indicates an expected call of DeletePVC. func (mr *MockCleanerMockRecorder) DeletePVC(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePVC", reflect.TypeOf((*MockCleaner)(nil).DeletePVC), arg0, arg1, arg2) } // DeletePod mocks base method. func (m *MockCleaner) DeletePod(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeletePod", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // DeletePod indicates an expected call of DeletePod. func (mr *MockCleanerMockRecorder) DeletePod(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePod", reflect.TypeOf((*MockCleaner)(nil).DeletePod), arg0, arg1, arg2) } // DeleteSnapshot mocks base method. func (m *MockCleaner) DeleteSnapshot(arg0 context.Context, arg1, arg2 string, arg3 *v1.GroupVersionForDiscovery) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DeleteSnapshot", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // DeleteSnapshot indicates an expected call of DeleteSnapshot. func (mr *MockCleanerMockRecorder) DeleteSnapshot(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockCleaner)(nil).DeleteSnapshot), arg0, arg1, arg2, arg3) } ================================================ FILE: pkg/csi/mocks/mock_data_validator.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: DataValidator) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" ) // MockDataValidator is a mock of DataValidator interface. type MockDataValidator struct { ctrl *gomock.Controller recorder *MockDataValidatorMockRecorder } // MockDataValidatorMockRecorder is the mock recorder for MockDataValidator. type MockDataValidatorMockRecorder struct { mock *MockDataValidator } // NewMockDataValidator creates a new mock instance. func NewMockDataValidator(ctrl *gomock.Controller) *MockDataValidator { mock := &MockDataValidator{ctrl: ctrl} mock.recorder = &MockDataValidatorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockDataValidator) EXPECT() *MockDataValidatorMockRecorder { return m.recorder } // FetchPodData mocks base method. func (m *MockDataValidator) FetchPodData(arg0 context.Context, arg1, arg2 string) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchPodData", arg0, arg1, arg2) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // FetchPodData indicates an expected call of FetchPodData. func (mr *MockDataValidatorMockRecorder) FetchPodData(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchPodData", reflect.TypeOf((*MockDataValidator)(nil).FetchPodData), arg0, arg1, arg2) } ================================================ FILE: pkg/csi/mocks/mock_file_restore_stepper.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: FileRestoreStepper) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" types "github.com/kastenhq/kubestr/pkg/csi/types" v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" v10 "k8s.io/api/core/v1" v11 "k8s.io/api/storage/v1" ) // MockFileRestoreStepper is a mock of FileRestoreStepper interface. type MockFileRestoreStepper struct { ctrl *gomock.Controller recorder *MockFileRestoreStepperMockRecorder } // MockFileRestoreStepperMockRecorder is the mock recorder for MockFileRestoreStepper. type MockFileRestoreStepperMockRecorder struct { mock *MockFileRestoreStepper } // NewMockFileRestoreStepper creates a new mock instance. func NewMockFileRestoreStepper(ctrl *gomock.Controller) *MockFileRestoreStepper { mock := &MockFileRestoreStepper{ctrl: ctrl} mock.recorder = &MockFileRestoreStepperMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockFileRestoreStepper) EXPECT() *MockFileRestoreStepperMockRecorder { return m.recorder } // Cleanup mocks base method. func (m *MockFileRestoreStepper) Cleanup(arg0 context.Context, arg1 *types.FileRestoreArgs, arg2 *v10.PersistentVolumeClaim, arg3 *v10.Pod) { m.ctrl.T.Helper() m.ctrl.Call(m, "Cleanup", arg0, arg1, arg2, arg3) } // Cleanup indicates an expected call of Cleanup. func (mr *MockFileRestoreStepperMockRecorder) Cleanup(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockFileRestoreStepper)(nil).Cleanup), arg0, arg1, arg2, arg3) } // CreateInspectorApplication mocks base method. func (m *MockFileRestoreStepper) CreateInspectorApplication(arg0 context.Context, arg1 *types.FileRestoreArgs, arg2 *v1.VolumeSnapshot, arg3, arg4 *v10.PersistentVolumeClaim, arg5 *v11.StorageClass) (*v10.Pod, *v10.PersistentVolumeClaim, string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateInspectorApplication", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(*v10.Pod) ret1, _ := ret[1].(*v10.PersistentVolumeClaim) ret2, _ := ret[2].(string) ret3, _ := ret[3].(error) return ret0, ret1, ret2, ret3 } // CreateInspectorApplication indicates an expected call of CreateInspectorApplication. func (mr *MockFileRestoreStepperMockRecorder) CreateInspectorApplication(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateInspectorApplication", reflect.TypeOf((*MockFileRestoreStepper)(nil).CreateInspectorApplication), arg0, arg1, arg2, arg3, arg4, arg5) } // ExecuteCopyCommand mocks base method. func (m *MockFileRestoreStepper) ExecuteCopyCommand(arg0 context.Context, arg1 *types.FileRestoreArgs, arg2 *v10.Pod, arg3 string) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ExecuteCopyCommand", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // ExecuteCopyCommand indicates an expected call of ExecuteCopyCommand. func (mr *MockFileRestoreStepperMockRecorder) ExecuteCopyCommand(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteCopyCommand", reflect.TypeOf((*MockFileRestoreStepper)(nil).ExecuteCopyCommand), arg0, arg1, arg2, arg3) } // PortForwardAPod mocks base method. func (m *MockFileRestoreStepper) PortForwardAPod(arg0 *v10.Pod, arg1 int) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PortForwardAPod", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // PortForwardAPod indicates an expected call of PortForwardAPod. func (mr *MockFileRestoreStepperMockRecorder) PortForwardAPod(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortForwardAPod", reflect.TypeOf((*MockFileRestoreStepper)(nil).PortForwardAPod), arg0, arg1) } // ValidateArgs mocks base method. func (m *MockFileRestoreStepper) ValidateArgs(arg0 context.Context, arg1 *types.FileRestoreArgs) (*v1.VolumeSnapshot, *v10.PersistentVolumeClaim, *v10.PersistentVolumeClaim, *v11.StorageClass, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateArgs", arg0, arg1) ret0, _ := ret[0].(*v1.VolumeSnapshot) ret1, _ := ret[1].(*v10.PersistentVolumeClaim) ret2, _ := ret[2].(*v10.PersistentVolumeClaim) ret3, _ := ret[3].(*v11.StorageClass) ret4, _ := ret[4].(error) return ret0, ret1, ret2, ret3, ret4 } // ValidateArgs indicates an expected call of ValidateArgs. func (mr *MockFileRestoreStepperMockRecorder) ValidateArgs(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateArgs", reflect.TypeOf((*MockFileRestoreStepper)(nil).ValidateArgs), arg0, arg1) } ================================================ FILE: pkg/csi/mocks/mock_kube_executor.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: KubeExecutor) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" ) // MockKubeExecutor is a mock of KubeExecutor interface. type MockKubeExecutor struct { ctrl *gomock.Controller recorder *MockKubeExecutorMockRecorder } // MockKubeExecutorMockRecorder is the mock recorder for MockKubeExecutor. type MockKubeExecutorMockRecorder struct { mock *MockKubeExecutor } // NewMockKubeExecutor creates a new mock instance. func NewMockKubeExecutor(ctrl *gomock.Controller) *MockKubeExecutor { mock := &MockKubeExecutor{ctrl: ctrl} mock.recorder = &MockKubeExecutorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockKubeExecutor) EXPECT() *MockKubeExecutorMockRecorder { return m.recorder } // Exec mocks base method. func (m *MockKubeExecutor) Exec(arg0 context.Context, arg1, arg2, arg3 string, arg4 []string) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Exec", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // Exec indicates an expected call of Exec. func (mr *MockKubeExecutorMockRecorder) Exec(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockKubeExecutor)(nil).Exec), arg0, arg1, arg2, arg3, arg4) } ================================================ FILE: pkg/csi/mocks/mock_port_forwarder.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: PortForwarder) // Package mocks is a generated GoMock package. package mocks import ( reflect "reflect" gomock "github.com/golang/mock/gomock" types "github.com/kastenhq/kubestr/pkg/csi/types" rest "k8s.io/client-go/rest" ) // MockPortForwarder is a mock of PortForwarder interface. type MockPortForwarder struct { ctrl *gomock.Controller recorder *MockPortForwarderMockRecorder } // MockPortForwarderMockRecorder is the mock recorder for MockPortForwarder. type MockPortForwarderMockRecorder struct { mock *MockPortForwarder } // NewMockPortForwarder creates a new mock instance. func NewMockPortForwarder(ctrl *gomock.Controller) *MockPortForwarder { mock := &MockPortForwarder{ctrl: ctrl} mock.recorder = &MockPortForwarderMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockPortForwarder) EXPECT() *MockPortForwarderMockRecorder { return m.recorder } // FetchRestConfig mocks base method. func (m *MockPortForwarder) FetchRestConfig() (*rest.Config, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchRestConfig") ret0, _ := ret[0].(*rest.Config) ret1, _ := ret[1].(error) return ret0, ret1 } // FetchRestConfig indicates an expected call of FetchRestConfig. func (mr *MockPortForwarderMockRecorder) FetchRestConfig() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchRestConfig", reflect.TypeOf((*MockPortForwarder)(nil).FetchRestConfig)) } // PortForwardAPod mocks base method. func (m *MockPortForwarder) PortForwardAPod(arg0 *types.PortForwardAPodRequest) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PortForwardAPod", arg0) ret0, _ := ret[0].(error) return ret0 } // PortForwardAPod indicates an expected call of PortForwardAPod. func (mr *MockPortForwarderMockRecorder) PortForwardAPod(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortForwardAPod", reflect.TypeOf((*MockPortForwarder)(nil).PortForwardAPod), arg0) } ================================================ FILE: pkg/csi/mocks/mock_pvc_browser_stepper.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: PVCBrowserStepper) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" types "github.com/kastenhq/kubestr/pkg/csi/types" v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" v10 "k8s.io/api/core/v1" v11 "k8s.io/api/storage/v1" ) // MockPVCBrowserStepper is a mock of PVCBrowserStepper interface. type MockPVCBrowserStepper struct { ctrl *gomock.Controller recorder *MockPVCBrowserStepperMockRecorder } // MockPVCBrowserStepperMockRecorder is the mock recorder for MockPVCBrowserStepper. type MockPVCBrowserStepperMockRecorder struct { mock *MockPVCBrowserStepper } // NewMockPVCBrowserStepper creates a new mock instance. func NewMockPVCBrowserStepper(ctrl *gomock.Controller) *MockPVCBrowserStepper { mock := &MockPVCBrowserStepper{ctrl: ctrl} mock.recorder = &MockPVCBrowserStepperMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockPVCBrowserStepper) EXPECT() *MockPVCBrowserStepperMockRecorder { return m.recorder } // Cleanup mocks base method. func (m *MockPVCBrowserStepper) Cleanup(arg0 context.Context, arg1 *v10.PersistentVolumeClaim, arg2 *v10.Pod, arg3 *v1.VolumeSnapshot) { m.ctrl.T.Helper() m.ctrl.Call(m, "Cleanup", arg0, arg1, arg2, arg3) } // Cleanup indicates an expected call of Cleanup. func (mr *MockPVCBrowserStepperMockRecorder) Cleanup(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockPVCBrowserStepper)(nil).Cleanup), arg0, arg1, arg2, arg3) } // CreateInspectorApplication mocks base method. func (m *MockPVCBrowserStepper) CreateInspectorApplication(arg0 context.Context, arg1 *types.PVCBrowseArgs, arg2 *v1.VolumeSnapshot, arg3 *v11.StorageClass) (*v10.Pod, *v10.PersistentVolumeClaim, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateInspectorApplication", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*v10.Pod) ret1, _ := ret[1].(*v10.PersistentVolumeClaim) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // CreateInspectorApplication indicates an expected call of CreateInspectorApplication. func (mr *MockPVCBrowserStepperMockRecorder) CreateInspectorApplication(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateInspectorApplication", reflect.TypeOf((*MockPVCBrowserStepper)(nil).CreateInspectorApplication), arg0, arg1, arg2, arg3) } // ExecuteTreeCommand mocks base method. func (m *MockPVCBrowserStepper) ExecuteTreeCommand(arg0 context.Context, arg1 *types.PVCBrowseArgs, arg2 *v10.Pod) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ExecuteTreeCommand", arg0, arg1, arg2) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // ExecuteTreeCommand indicates an expected call of ExecuteTreeCommand. func (mr *MockPVCBrowserStepperMockRecorder) ExecuteTreeCommand(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteTreeCommand", reflect.TypeOf((*MockPVCBrowserStepper)(nil).ExecuteTreeCommand), arg0, arg1, arg2) } // PortForwardAPod mocks base method. func (m *MockPVCBrowserStepper) PortForwardAPod(arg0 context.Context, arg1 *v10.Pod, arg2 int) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PortForwardAPod", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // PortForwardAPod indicates an expected call of PortForwardAPod. func (mr *MockPVCBrowserStepperMockRecorder) PortForwardAPod(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortForwardAPod", reflect.TypeOf((*MockPVCBrowserStepper)(nil).PortForwardAPod), arg0, arg1, arg2) } // SnapshotPVC mocks base method. func (m *MockPVCBrowserStepper) SnapshotPVC(arg0 context.Context, arg1 *types.PVCBrowseArgs, arg2 string) (*v1.VolumeSnapshot, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SnapshotPVC", arg0, arg1, arg2) ret0, _ := ret[0].(*v1.VolumeSnapshot) ret1, _ := ret[1].(error) return ret0, ret1 } // SnapshotPVC indicates an expected call of SnapshotPVC. func (mr *MockPVCBrowserStepperMockRecorder) SnapshotPVC(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SnapshotPVC", reflect.TypeOf((*MockPVCBrowserStepper)(nil).SnapshotPVC), arg0, arg1, arg2) } // ValidateArgs mocks base method. func (m *MockPVCBrowserStepper) ValidateArgs(arg0 context.Context, arg1 *types.PVCBrowseArgs) (*v11.StorageClass, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateArgs", arg0, arg1) ret0, _ := ret[0].(*v11.StorageClass) ret1, _ := ret[1].(error) return ret0, ret1 } // ValidateArgs indicates an expected call of ValidateArgs. func (mr *MockPVCBrowserStepperMockRecorder) ValidateArgs(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateArgs", reflect.TypeOf((*MockPVCBrowserStepper)(nil).ValidateArgs), arg0, arg1) } ================================================ FILE: pkg/csi/mocks/mock_snapshot_browser_stepper.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: SnapshotBrowserStepper) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" types "github.com/kastenhq/kubestr/pkg/csi/types" v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" v10 "k8s.io/api/core/v1" v11 "k8s.io/api/storage/v1" ) // MockSnapshotBrowserStepper is a mock of SnapshotBrowserStepper interface. type MockSnapshotBrowserStepper struct { ctrl *gomock.Controller recorder *MockSnapshotBrowserStepperMockRecorder } // MockSnapshotBrowserStepperMockRecorder is the mock recorder for MockSnapshotBrowserStepper. type MockSnapshotBrowserStepperMockRecorder struct { mock *MockSnapshotBrowserStepper } // NewMockSnapshotBrowserStepper creates a new mock instance. func NewMockSnapshotBrowserStepper(ctrl *gomock.Controller) *MockSnapshotBrowserStepper { mock := &MockSnapshotBrowserStepper{ctrl: ctrl} mock.recorder = &MockSnapshotBrowserStepperMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockSnapshotBrowserStepper) EXPECT() *MockSnapshotBrowserStepperMockRecorder { return m.recorder } // Cleanup mocks base method. func (m *MockSnapshotBrowserStepper) Cleanup(arg0 context.Context, arg1 *v10.PersistentVolumeClaim, arg2 *v10.Pod) { m.ctrl.T.Helper() m.ctrl.Call(m, "Cleanup", arg0, arg1, arg2) } // Cleanup indicates an expected call of Cleanup. func (mr *MockSnapshotBrowserStepperMockRecorder) Cleanup(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockSnapshotBrowserStepper)(nil).Cleanup), arg0, arg1, arg2) } // CreateInspectorApplication mocks base method. func (m *MockSnapshotBrowserStepper) CreateInspectorApplication(arg0 context.Context, arg1 *types.SnapshotBrowseArgs, arg2 *v1.VolumeSnapshot, arg3 *v11.StorageClass) (*v10.Pod, *v10.PersistentVolumeClaim, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateInspectorApplication", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*v10.Pod) ret1, _ := ret[1].(*v10.PersistentVolumeClaim) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // CreateInspectorApplication indicates an expected call of CreateInspectorApplication. func (mr *MockSnapshotBrowserStepperMockRecorder) CreateInspectorApplication(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateInspectorApplication", reflect.TypeOf((*MockSnapshotBrowserStepper)(nil).CreateInspectorApplication), arg0, arg1, arg2, arg3) } // ExecuteTreeCommand mocks base method. func (m *MockSnapshotBrowserStepper) ExecuteTreeCommand(arg0 context.Context, arg1 *types.SnapshotBrowseArgs, arg2 *v10.Pod) (string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ExecuteTreeCommand", arg0, arg1, arg2) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // ExecuteTreeCommand indicates an expected call of ExecuteTreeCommand. func (mr *MockSnapshotBrowserStepperMockRecorder) ExecuteTreeCommand(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteTreeCommand", reflect.TypeOf((*MockSnapshotBrowserStepper)(nil).ExecuteTreeCommand), arg0, arg1, arg2) } // PortForwardAPod mocks base method. func (m *MockSnapshotBrowserStepper) PortForwardAPod(arg0 context.Context, arg1 *v10.Pod, arg2 int) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "PortForwardAPod", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // PortForwardAPod indicates an expected call of PortForwardAPod. func (mr *MockSnapshotBrowserStepperMockRecorder) PortForwardAPod(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PortForwardAPod", reflect.TypeOf((*MockSnapshotBrowserStepper)(nil).PortForwardAPod), arg0, arg1, arg2) } // ValidateArgs mocks base method. func (m *MockSnapshotBrowserStepper) ValidateArgs(arg0 context.Context, arg1 *types.SnapshotBrowseArgs) (*v1.VolumeSnapshot, *v11.StorageClass, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateArgs", arg0, arg1) ret0, _ := ret[0].(*v1.VolumeSnapshot) ret1, _ := ret[1].(*v11.StorageClass) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // ValidateArgs indicates an expected call of ValidateArgs. func (mr *MockSnapshotBrowserStepperMockRecorder) ValidateArgs(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateArgs", reflect.TypeOf((*MockSnapshotBrowserStepper)(nil).ValidateArgs), arg0, arg1) } ================================================ FILE: pkg/csi/mocks/mock_snapshot_creator.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: SnapshotCreator) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" snapshot "github.com/kanisterio/kanister/pkg/kube/snapshot" types "github.com/kastenhq/kubestr/pkg/csi/types" v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" v10 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // MockSnapshotCreator is a mock of SnapshotCreator interface. type MockSnapshotCreator struct { ctrl *gomock.Controller recorder *MockSnapshotCreatorMockRecorder } // MockSnapshotCreatorMockRecorder is the mock recorder for MockSnapshotCreator. type MockSnapshotCreatorMockRecorder struct { mock *MockSnapshotCreator } // NewMockSnapshotCreator creates a new mock instance. func NewMockSnapshotCreator(ctrl *gomock.Controller) *MockSnapshotCreator { mock := &MockSnapshotCreator{ctrl: ctrl} mock.recorder = &MockSnapshotCreatorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockSnapshotCreator) EXPECT() *MockSnapshotCreatorMockRecorder { return m.recorder } // CreateFromSourceCheck mocks base method. func (m *MockSnapshotCreator) CreateFromSourceCheck(arg0 context.Context, arg1 snapshot.Snapshotter, arg2 *types.CreateFromSourceCheckArgs, arg3 *v10.GroupVersionForDiscovery) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateFromSourceCheck", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // CreateFromSourceCheck indicates an expected call of CreateFromSourceCheck. func (mr *MockSnapshotCreatorMockRecorder) CreateFromSourceCheck(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateFromSourceCheck", reflect.TypeOf((*MockSnapshotCreator)(nil).CreateFromSourceCheck), arg0, arg1, arg2, arg3) } // CreateSnapshot mocks base method. func (m *MockSnapshotCreator) CreateSnapshot(arg0 context.Context, arg1 snapshot.Snapshotter, arg2 *types.CreateSnapshotArgs) (*v1.VolumeSnapshot, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateSnapshot", arg0, arg1, arg2) ret0, _ := ret[0].(*v1.VolumeSnapshot) ret1, _ := ret[1].(error) return ret0, ret1 } // CreateSnapshot indicates an expected call of CreateSnapshot. func (mr *MockSnapshotCreatorMockRecorder) CreateSnapshot(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSnapshot", reflect.TypeOf((*MockSnapshotCreator)(nil).CreateSnapshot), arg0, arg1, arg2) } // NewSnapshotter mocks base method. func (m *MockSnapshotCreator) NewSnapshotter() (snapshot.Snapshotter, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewSnapshotter") ret0, _ := ret[0].(snapshot.Snapshotter) ret1, _ := ret[1].(error) return ret0, ret1 } // NewSnapshotter indicates an expected call of NewSnapshotter. func (mr *MockSnapshotCreatorMockRecorder) NewSnapshotter() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewSnapshotter", reflect.TypeOf((*MockSnapshotCreator)(nil).NewSnapshotter)) } ================================================ FILE: pkg/csi/mocks/mock_snapshot_restore_stepper.go ================================================ // Code generated by MockGen. DO NOT EDIT. // Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: SnapshotRestoreStepper) // Package mocks is a generated GoMock package. package mocks import ( context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" types "github.com/kastenhq/kubestr/pkg/csi/types" v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" v10 "k8s.io/api/core/v1" ) // MockSnapshotRestoreStepper is a mock of SnapshotRestoreStepper interface. type MockSnapshotRestoreStepper struct { ctrl *gomock.Controller recorder *MockSnapshotRestoreStepperMockRecorder } // MockSnapshotRestoreStepperMockRecorder is the mock recorder for MockSnapshotRestoreStepper. type MockSnapshotRestoreStepperMockRecorder struct { mock *MockSnapshotRestoreStepper } // NewMockSnapshotRestoreStepper creates a new mock instance. func NewMockSnapshotRestoreStepper(ctrl *gomock.Controller) *MockSnapshotRestoreStepper { mock := &MockSnapshotRestoreStepper{ctrl: ctrl} mock.recorder = &MockSnapshotRestoreStepperMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockSnapshotRestoreStepper) EXPECT() *MockSnapshotRestoreStepperMockRecorder { return m.recorder } // Cleanup mocks base method. func (m *MockSnapshotRestoreStepper) Cleanup(arg0 context.Context, arg1 *types.CSISnapshotRestoreResults) { m.ctrl.T.Helper() m.ctrl.Call(m, "Cleanup", arg0, arg1) } // Cleanup indicates an expected call of Cleanup. func (mr *MockSnapshotRestoreStepperMockRecorder) Cleanup(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).Cleanup), arg0, arg1) } // CreateApplication mocks base method. func (m *MockSnapshotRestoreStepper) CreateApplication(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs, arg2 string) (*v10.Pod, *v10.PersistentVolumeClaim, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateApplication", arg0, arg1, arg2) ret0, _ := ret[0].(*v10.Pod) ret1, _ := ret[1].(*v10.PersistentVolumeClaim) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // CreateApplication indicates an expected call of CreateApplication. func (mr *MockSnapshotRestoreStepperMockRecorder) CreateApplication(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateApplication", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).CreateApplication), arg0, arg1, arg2) } // RestoreApplication mocks base method. func (m *MockSnapshotRestoreStepper) RestoreApplication(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs, arg2 *v1.VolumeSnapshot) (*v10.Pod, *v10.PersistentVolumeClaim, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RestoreApplication", arg0, arg1, arg2) ret0, _ := ret[0].(*v10.Pod) ret1, _ := ret[1].(*v10.PersistentVolumeClaim) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // RestoreApplication indicates an expected call of RestoreApplication. func (mr *MockSnapshotRestoreStepperMockRecorder) RestoreApplication(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreApplication", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).RestoreApplication), arg0, arg1, arg2) } // SnapshotApplication mocks base method. func (m *MockSnapshotRestoreStepper) SnapshotApplication(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs, arg2 *v10.PersistentVolumeClaim, arg3 string) (*v1.VolumeSnapshot, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SnapshotApplication", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*v1.VolumeSnapshot) ret1, _ := ret[1].(error) return ret0, ret1 } // SnapshotApplication indicates an expected call of SnapshotApplication. func (mr *MockSnapshotRestoreStepperMockRecorder) SnapshotApplication(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SnapshotApplication", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).SnapshotApplication), arg0, arg1, arg2, arg3) } // ValidateArgs mocks base method. func (m *MockSnapshotRestoreStepper) ValidateArgs(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateArgs", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // ValidateArgs indicates an expected call of ValidateArgs. func (mr *MockSnapshotRestoreStepperMockRecorder) ValidateArgs(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateArgs", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).ValidateArgs), arg0, arg1) } // ValidateData mocks base method. func (m *MockSnapshotRestoreStepper) ValidateData(arg0 context.Context, arg1 *v10.Pod, arg2 string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateData", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // ValidateData indicates an expected call of ValidateData. func (mr *MockSnapshotRestoreStepperMockRecorder) ValidateData(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateData", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).ValidateData), arg0, arg1, arg2) } ================================================ FILE: pkg/csi/pvc_inspector.go ================================================ package csi import ( "bytes" "context" "fmt" "log" "os" "os/exec" "os/signal" "runtime" "sync" "syscall" "time" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) type PVCBrowseRunner struct { KubeCli kubernetes.Interface DynCli dynamic.Interface browserSteps PVCBrowserStepper pvc *v1.PersistentVolumeClaim pod *v1.Pod snapshot *snapv1.VolumeSnapshot } func (r *PVCBrowseRunner) RunPVCBrowse(ctx context.Context, args *types.PVCBrowseArgs) error { r.browserSteps = &pvcBrowserSteps{ validateOps: &validateOperations{ kubeCli: r.KubeCli, dynCli: r.DynCli, }, versionFetchOps: &apiVersionFetch{ kubeCli: r.KubeCli, }, createAppOps: &applicationCreate{ kubeCli: r.KubeCli, }, snapshotCreateOps: &snapshotCreate{ kubeCli: r.KubeCli, dynCli: r.DynCli, }, portForwardOps: &portforward{}, kubeExecutor: &kubeExec{ kubeCli: r.KubeCli, }, cleanerOps: &cleanse{ kubeCli: r.KubeCli, dynCli: r.DynCli, }, } if args.ShowTree { fmt.Println("Show Tree works for PVC!") return nil } return r.RunPVCBrowseHelper(ctx, args) } func (r *PVCBrowseRunner) RunPVCBrowseHelper(ctx context.Context, args *types.PVCBrowseArgs) error { defer func() { fmt.Println("Cleaning up resources") r.browserSteps.Cleanup(ctx, r.pvc, r.pod, r.snapshot) }() if r.KubeCli == nil || r.DynCli == nil { return fmt.Errorf("cli uninitialized") } sc, err := r.browserSteps.ValidateArgs(ctx, args) if err != nil { return errors.Wrap(err, "failed to validate arguments") } fmt.Println("Taking a snapshot.") snapName := snapshotPrefix + time.Now().Format("20060102150405") r.snapshot, err = r.browserSteps.SnapshotPVC(ctx, args, snapName) if err != nil { return errors.Wrap(err, "failed to snapshot PVC") } fmt.Println("Creating the browser pod.") r.pod, r.pvc, err = r.browserSteps.CreateInspectorApplication(ctx, args, r.snapshot, sc) if err != nil { return errors.Wrap(err, "failed to create inspector application") } if args.ShowTree { fmt.Println("Printing the tree structure from root directory.") stdout, err := r.browserSteps.ExecuteTreeCommand(ctx, args, r.pod) if err != nil { return errors.Wrap(err, "failed to execute tree command in pod") } fmt.Printf("\n%s\n\n", stdout) return nil } fmt.Println("Forwarding the port.") err = r.browserSteps.PortForwardAPod(ctx, r.pod, args.LocalPort) if err != nil { return errors.Wrap(err, "failed to forward pod port") } return nil } //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_pvc_browser_stepper.go -package=mocks . PVCBrowserStepper type PVCBrowserStepper interface { ValidateArgs(ctx context.Context, args *types.PVCBrowseArgs) (*sv1.StorageClass, error) SnapshotPVC(ctx context.Context, args *types.PVCBrowseArgs, snapshotName string) (*snapv1.VolumeSnapshot, error) CreateInspectorApplication(ctx context.Context, args *types.PVCBrowseArgs, snapshot *snapv1.VolumeSnapshot, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, error) ExecuteTreeCommand(ctx context.Context, args *types.PVCBrowseArgs, pod *v1.Pod) (string, error) PortForwardAPod(ctx context.Context, pod *v1.Pod, localPort int) error Cleanup(ctx context.Context, pvc *v1.PersistentVolumeClaim, pod *v1.Pod, snapshot *snapv1.VolumeSnapshot) } type pvcBrowserSteps struct { validateOps ArgumentValidator versionFetchOps ApiVersionFetcher createAppOps ApplicationCreator snapshotCreateOps SnapshotCreator portForwardOps PortForwarder cleanerOps Cleaner kubeExecutor KubeExecutor SnapshotGroupVersion *metav1.GroupVersionForDiscovery } func (p *pvcBrowserSteps) ValidateArgs(ctx context.Context, args *types.PVCBrowseArgs) (*sv1.StorageClass, error) { if err := args.Validate(); err != nil { return nil, errors.Wrap(err, "failed to validate input arguments") } if err := p.validateOps.ValidateNamespace(ctx, args.Namespace); err != nil { return nil, errors.Wrap(err, "failed to validate Namespace") } pvc, err := p.validateOps.ValidatePVC(ctx, args.PVCName, args.Namespace) if err != nil { return nil, errors.Wrap(err, "failed to validate PVC") } pvName := pvc.Spec.VolumeName if pvName == "" { return nil, errors.Errorf("PVC (%s) not bound. namespace - (%s)", pvc.Name, pvc.Namespace) } pv, err := p.validateOps.FetchPV(ctx, pvName) if err != nil { return nil, errors.Wrap(err, "failed to fetch PV") } if pv.Spec.CSI == nil { return nil, errors.New("PVC is not using a CSI volume") } sc, err := p.validateOps.ValidateStorageClass(ctx, *pvc.Spec.StorageClassName) if err != nil { return nil, errors.Wrap(err, "failed to validate SC") } groupVersion, err := p.versionFetchOps.GetCSISnapshotGroupVersion() if err != nil { return nil, errors.Wrap(err, "failed to fetch groupVersion") } p.SnapshotGroupVersion = groupVersion uVSC, err := p.validateOps.ValidateVolumeSnapshotClass(ctx, args.VolumeSnapshotClass, groupVersion) if err != nil { return nil, errors.Wrap(err, "failed to validate VolumeSnapshotClass") } vscDriver := getDriverNameFromUVSC(*uVSC, groupVersion.GroupVersion) if sc.Provisioner != vscDriver { return nil, fmt.Errorf("provisioner for StorageClass (%s) and VolumeSnapshotClass driver (%s) are different", sc.Provisioner, vscDriver) } return sc, nil } func (p *pvcBrowserSteps) SnapshotPVC(ctx context.Context, args *types.PVCBrowseArgs, snapshotName string) (*snapv1.VolumeSnapshot, error) { snapshotter, err := p.snapshotCreateOps.NewSnapshotter() if err != nil { return nil, errors.Wrap(err, "failed to load snapshotter") } createSnapshotArgs := &types.CreateSnapshotArgs{ Namespace: args.Namespace, PVCName: args.PVCName, VolumeSnapshotClass: args.VolumeSnapshotClass, SnapshotName: snapshotName, } return p.snapshotCreateOps.CreateSnapshot(ctx, snapshotter, createSnapshotArgs) } func (p *pvcBrowserSteps) CreateInspectorApplication(ctx context.Context, args *types.PVCBrowseArgs, snapshot *snapv1.VolumeSnapshot, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, error) { snapshotAPIGroup := "snapshot.storage.k8s.io" snapshotKind := "VolumeSnapshot" dataSource := &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: snapshotKind, Name: snapshot.Name, } pvcArgs := &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: storageClass.Name, Namespace: args.Namespace, DataSource: dataSource, RestoreSize: snapshot.Status.RestoreSize, } pvc, err := p.createAppOps.CreatePVC(ctx, pvcArgs) if err != nil { return nil, nil, errors.Wrap(err, "failed to restore PVC") } podArgs := &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: args.Namespace, RunAsUser: args.RunAsUser, ContainerImage: "filebrowser/filebrowser:v2", ContainerArgs: []string{"--noauth", "-r", "/pvc-data"}, PVCMap: map[string]types.VolumePath{ pvc.Name: { MountPath: "/pvc-data", }, }, } if args.ShowTree { podArgs = &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: args.Namespace, RunAsUser: args.RunAsUser, ContainerImage: "alpine:3.19", Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", "while true; do sleep 3600; done"}, PVCMap: map[string]types.VolumePath{ pvc.Name: { MountPath: "/pvc-data", }, }, } } pod, err := p.createAppOps.CreatePod(ctx, podArgs) if err != nil { return nil, pvc, errors.Wrap(err, "failed to create browse pod") } if err = p.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil { return pod, pvc, errors.Wrap(err, "pod failed to become ready") } return pod, pvc, nil } func (p *pvcBrowserSteps) ExecuteTreeCommand(ctx context.Context, args *types.PVCBrowseArgs, pod *v1.Pod) (string, error) { command := []string{"tree", "/pvc-data"} stdout, err := p.kubeExecutor.Exec(ctx, args.Namespace, pod.Name, pod.Spec.Containers[0].Name, command) if err != nil { return "", errors.Wrapf(err, "error running command:(%v)", command) } return stdout, nil } func (p *pvcBrowserSteps) PortForwardAPod(ctx context.Context, pod *v1.Pod, localPort int) error { var wg sync.WaitGroup wg.Add(1) stopChan, readyChan, errChan := make(chan struct{}, 1), make(chan struct{}, 1), make(chan string) out, errOut := new(bytes.Buffer), new(bytes.Buffer) cfg, err := p.portForwardOps.FetchRestConfig() if err != nil { return errors.New("Failed to fetch rest config") } sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) go func() { <-sigs fmt.Println("Stopping port forward") close(stopChan) wg.Done() }() go func() { pfArgs := &types.PortForwardAPodRequest{ RestConfig: cfg, Pod: pod, LocalPort: localPort, PodPort: 80, OutStream: bytes.Buffer(*out), ErrOutStream: bytes.Buffer(*errOut), StopCh: stopChan, ReadyCh: readyChan, } err = p.portForwardOps.PortForwardAPod(pfArgs) if err != nil { errChan <- fmt.Sprintf("Failed to port forward (%s)", err.Error()) } }() select { case <-readyChan: url := fmt.Sprintf("http://localhost:%d/", localPort) fmt.Printf("Port forwarding is ready to get traffic. visit %s\n", url) openbrowser(url) wg.Wait() case msg := <-errChan: return errors.New(msg) } return nil } func (p *pvcBrowserSteps) Cleanup(ctx context.Context, pvc *v1.PersistentVolumeClaim, pod *v1.Pod, snapshot *snapv1.VolumeSnapshot) { if pvc != nil { err := p.cleanerOps.DeletePVC(ctx, pvc.Name, pvc.Namespace) if err != nil { fmt.Println("Failed to delete PVC", pvc) } } if pod != nil { err := p.cleanerOps.DeletePod(ctx, pod.Name, pod.Namespace) if err != nil { fmt.Println("Failed to delete Pod", pod) } } if snapshot != nil { err := p.cleanerOps.DeleteSnapshot(ctx, snapshot.Name, snapshot.Namespace, p.SnapshotGroupVersion) if err != nil { fmt.Println("Failed to delete Snapshot", snapshot) } } } func openbrowser(url string) { var err error switch runtime.GOOS { case "linux": err = exec.Command("xdg-open", url).Start() case "windows": err = exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start() case "darwin": err = exec.Command("open", url).Start() default: err = fmt.Errorf("unsupported platform") } if err != nil { log.Fatal(err) } } ================================================ FILE: pkg/csi/pvc_inspector_steps_test.go ================================================ package csi import ( "context" "fmt" "github.com/golang/mock/gomock" "github.com/kastenhq/kubestr/pkg/common" "github.com/kastenhq/kubestr/pkg/csi/mocks" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" . "gopkg.in/check.v1" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) func (s *CSITestSuite) TestPvcBrowseValidateArgs(c *C) { ctx := context.Background() scName := "sc" type fields struct { validateOps *mocks.MockArgumentValidator versionOps *mocks.MockApiVersionFetcher } for _, tc := range []struct { args *types.PVCBrowseArgs prepare func(f *fields) errChecker Checker }{ { // valid args args: &types.PVCBrowseArgs{ PVCName: "pvc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "pvc", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().FetchPV(gomock.Any(), "vol").Return( &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "vol", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{}, }, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), "sc").Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return( &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }).Return(&unstructured.Unstructured{ Object: map[string]interface{}{ common.VolSnapClassDriverKey: "p1", }, }, nil), ) }, errChecker: IsNil, }, { // driver mismatch args: &types.PVCBrowseArgs{ PVCName: "pvc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "pvc", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().FetchPV(gomock.Any(), "vol").Return( &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "vol", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{}, }, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return( &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }).Return(&unstructured.Unstructured{ Object: map[string]interface{}{ common.VolSnapClassDriverKey: "p2", }, }, nil), ) }, errChecker: NotNil, }, { // vsc error args: &types.PVCBrowseArgs{ PVCName: "pvc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().FetchPV(gomock.Any(), "vol").Return( &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "vol", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{}, }, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("vsc error")), ) }, errChecker: NotNil, }, { // get driver versionn error args: &types.PVCBrowseArgs{ PVCName: "pvc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().FetchPV(gomock.Any(), "vol").Return( &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "vol", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{}, }, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, fmt.Errorf("driver version error")), ) }, errChecker: NotNil, }, { // sc error args: &types.PVCBrowseArgs{ PVCName: "pvc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().FetchPV(gomock.Any(), "vol").Return( &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "vol", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ CSI: &v1.CSIPersistentVolumeSource{}, }, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("sc error")), ) }, errChecker: NotNil, }, { // non csi error args: &types.PVCBrowseArgs{ PVCName: "pvc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().FetchPV(gomock.Any(), "vol").Return( &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ Name: "vol", }, Spec: v1.PersistentVolumeSpec{ PersistentVolumeSource: v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}, }, }, }, nil, ), ) }, errChecker: NotNil, }, { // fetch pv error args: &types.PVCBrowseArgs{ PVCName: "pvc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().FetchPV(gomock.Any(), "vol").Return(nil, fmt.Errorf("pv fail")), ) }, errChecker: NotNil, }, { // validate pvc error args: &types.PVCBrowseArgs{ PVCName: "pvc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("validate pvc error")), ) }, errChecker: NotNil, }, { // validate ns error args: &types.PVCBrowseArgs{ PVCName: "pvc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(fmt.Errorf("validate ns error")), ) }, errChecker: NotNil, }, { // validate pvc error args: &types.PVCBrowseArgs{ PVCName: "", VolumeSnapshotClass: "vsc", Namespace: "ns", }, errChecker: NotNil, }, { // validate vsc error args: &types.PVCBrowseArgs{ PVCName: "dfd", VolumeSnapshotClass: "", Namespace: "ns", }, errChecker: NotNil, }, { // validate ns error args: &types.PVCBrowseArgs{ PVCName: "dfd", VolumeSnapshotClass: "ddd", Namespace: "", }, errChecker: NotNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ validateOps: mocks.NewMockArgumentValidator(ctrl), versionOps: mocks.NewMockApiVersionFetcher(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &pvcBrowserSteps{ validateOps: f.validateOps, versionFetchOps: f.versionOps, } _, err := stepper.ValidateArgs(ctx, tc.args) c.Check(err, tc.errChecker) } } func (s *CSITestSuite) TestPvcBrowseSnapshotPVC(c *C) { ctx := context.Background() snapshotter := &fakeSnapshotter{name: "snapshotter"} groupversion := &metav1.GroupVersionForDiscovery{ GroupVersion: "gv", Version: "v", } type fields struct { snapshotOps *mocks.MockSnapshotCreator } for _, tc := range []struct { args *types.PVCBrowseArgs snapshotName string prepare func(f *fields) errChecker Checker snapChecker Checker }{ { args: &types.PVCBrowseArgs{ Namespace: "ns", VolumeSnapshotClass: "vsc", PVCName: "pvc1", }, snapshotName: "snap1", prepare: func(f *fields) { gomock.InOrder( f.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil), f.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "pvc1", VolumeSnapshotClass: "vsc", SnapshotName: "snap1", }).Return(&snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "createdName", }, }, nil), ) }, errChecker: IsNil, snapChecker: NotNil, }, { args: &types.PVCBrowseArgs{ Namespace: "ns", VolumeSnapshotClass: "vsc", PVCName: "pvc1", }, snapshotName: "snap1", prepare: func(f *fields) { gomock.InOrder( f.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil), f.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "pvc1", VolumeSnapshotClass: "vsc", SnapshotName: "snap1", }).Return(nil, fmt.Errorf("error")), ) }, errChecker: NotNil, snapChecker: IsNil, }, { args: &types.PVCBrowseArgs{ Namespace: "ns", VolumeSnapshotClass: "vsc", PVCName: "pvc1", }, snapshotName: "snap1", prepare: func(f *fields) { gomock.InOrder( f.snapshotOps.EXPECT().NewSnapshotter().Return(nil, fmt.Errorf("error")), ) }, errChecker: NotNil, snapChecker: IsNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ snapshotOps: mocks.NewMockSnapshotCreator(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &pvcBrowserSteps{ snapshotCreateOps: f.snapshotOps, SnapshotGroupVersion: groupversion, } snapshot, err := stepper.SnapshotPVC(ctx, tc.args, tc.snapshotName) c.Check(err, tc.errChecker) c.Check(snapshot, tc.snapChecker) } } func (s *CSITestSuite) TestCreateInspectorApplicationForPVC(c *C) { ctx := context.Background() resourceQuantity := resource.MustParse("1Gi") snapshotAPIGroup := "snapshot.storage.k8s.io" type fields struct { createAppOps *mocks.MockApplicationCreator } for _, tc := range []struct { args *types.PVCBrowseArgs snapshot *snapv1.VolumeSnapshot sc *sv1.StorageClass prepare func(f *fields) errChecker Checker podChecker Checker pvcChecker Checker }{ { args: &types.PVCBrowseArgs{ Namespace: "ns", RunAsUser: 100, }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: "sc", Namespace: "ns", DataSource: &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: "VolumeSnapshot", Name: "snap1", }, RestoreSize: &resourceQuantity, }).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: "ns", ContainerArgs: []string{"--noauth", "-r", "/pvc-data"}, RunAsUser: 100, ContainerImage: "filebrowser/filebrowser:v2", PVCMap: map[string]types.VolumePath{ "pvc1": { MountPath: "/pvc-data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", }, }, nil), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod1").Return(nil), ) }, errChecker: IsNil, podChecker: NotNil, pvcChecker: NotNil, }, { args: &types.PVCBrowseArgs{ Namespace: "ns", RunAsUser: 100, }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: "sc", Namespace: "ns", DataSource: &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: "VolumeSnapshot", Name: "snap1", }, RestoreSize: &resourceQuantity, }).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: "ns", ContainerArgs: []string{"--noauth", "-r", "/pvc-data"}, RunAsUser: 100, ContainerImage: "filebrowser/filebrowser:v2", PVCMap: map[string]types.VolumePath{ "pvc1": { MountPath: "/pvc-data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", }, }, nil), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod1").Return(fmt.Errorf("pod ready error")), ) }, errChecker: NotNil, podChecker: NotNil, pvcChecker: NotNil, }, { args: &types.PVCBrowseArgs{ Namespace: "ns", RunAsUser: 100, }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("pod error")), ) }, errChecker: NotNil, podChecker: IsNil, pvcChecker: NotNil, }, { args: &types.PVCBrowseArgs{ Namespace: "ns", RunAsUser: 100, }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("error")), ) }, errChecker: NotNil, podChecker: IsNil, pvcChecker: IsNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ createAppOps: mocks.NewMockApplicationCreator(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &pvcBrowserSteps{ createAppOps: f.createAppOps, } pod, pvc, err := stepper.CreateInspectorApplication(ctx, tc.args, tc.snapshot, tc.sc) c.Check(err, tc.errChecker) c.Check(pod, tc.podChecker) c.Check(pvc, tc.pvcChecker) } } func (s *CSITestSuite) TestPVCBrowseCleanup(c *C) { ctx := context.Background() groupversion := &metav1.GroupVersionForDiscovery{ GroupVersion: "gv", Version: "v", } type fields struct { cleanerOps *mocks.MockCleaner } for _, tc := range []struct { pvc *v1.PersistentVolumeClaim pod *v1.Pod snapshot *snapv1.VolumeSnapshot prepare func(f *fields) }{ { pvc: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, }, pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "ns", }, }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", Namespace: "ns", }, }, prepare: func(f *fields) { gomock.InOrder( f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc", "ns").Return(nil), f.cleanerOps.EXPECT().DeletePod(ctx, "pod", "ns").Return(nil), f.cleanerOps.EXPECT().DeleteSnapshot(ctx, "snap1", "ns", groupversion).Return(nil), ) }, }, { pvc: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, }, pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "ns", }, }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", Namespace: "ns", }, }, prepare: func(f *fields) { gomock.InOrder( f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc", "ns").Return(fmt.Errorf("err")), f.cleanerOps.EXPECT().DeletePod(ctx, "pod", "ns").Return(fmt.Errorf("err")), f.cleanerOps.EXPECT().DeleteSnapshot(ctx, "snap1", "ns", groupversion).Return(fmt.Errorf("err")), ) }, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ cleanerOps: mocks.NewMockCleaner(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &pvcBrowserSteps{ cleanerOps: f.cleanerOps, SnapshotGroupVersion: groupversion, } stepper.Cleanup(ctx, tc.pvc, tc.pod, tc.snapshot) } } ================================================ FILE: pkg/csi/pvc_inspector_test.go ================================================ package csi import ( "context" "fmt" "github.com/golang/mock/gomock" "github.com/kastenhq/kubestr/pkg/csi/mocks" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" . "gopkg.in/check.v1" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" fakedynamic "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) func (s *CSITestSuite) TestRunPVCBrowseHelper(c *C) { ctx := context.Background() type fields struct { stepperOps *mocks.MockPVCBrowserStepper } for _, tc := range []struct { kubeCli kubernetes.Interface dynCli dynamic.Interface args *types.PVCBrowseArgs prepare func(f *fields) errChecker Checker }{ { // success kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.PVCBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return( &sv1.StorageClass{}, nil, ), f.stepperOps.EXPECT().SnapshotPVC(gomock.Any(), gomock.Any(), gomock.Any()).Return( &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", Namespace: "ns", }}, nil, ), f.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", Namespace: "ns", }, }, &sv1.StorageClass{}, ).Return( &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, nil, ), f.stepperOps.EXPECT().PortForwardAPod(gomock.Any(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, gomock.Any(), ).Return(nil), f.stepperOps.EXPECT().Cleanup(gomock.Any(), &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", Namespace: "ns", }, }, ), ) }, errChecker: IsNil, }, { // portforward failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.PVCBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil), f.stepperOps.EXPECT().SnapshotPVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil), f.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().PortForwardAPod(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("portforward error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // createapp failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.PVCBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil), f.stepperOps.EXPECT().SnapshotPVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil), f.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf("createapp error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // snapshot failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.PVCBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil), f.stepperOps.EXPECT().SnapshotPVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("snapshot error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // validate failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.PVCBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("snapshot error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // emptycli failure kubeCli: nil, dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.PVCBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // emptydyncli failure kubeCli: fake.NewSimpleClientset(), dynCli: nil, args: &types.PVCBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ stepperOps: mocks.NewMockPVCBrowserStepper(ctrl), } if tc.prepare != nil { tc.prepare(&f) } runner := &PVCBrowseRunner{ KubeCli: tc.kubeCli, DynCli: tc.dynCli, browserSteps: f.stepperOps, } err := runner.RunPVCBrowseHelper(ctx, tc.args) c.Check(err, tc.errChecker) } } func (s *CSITestSuite) TestPVCBrowseRunner(c *C) { ctx := context.Background() r := &PVCBrowseRunner{ browserSteps: &pvcBrowserSteps{}, } err := r.RunPVCBrowseHelper(ctx, nil) c.Check(err, NotNil) } ================================================ FILE: pkg/csi/snapshot_inspector.go ================================================ package csi import ( "bytes" "context" "fmt" "os" "os/signal" "sync" "syscall" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) type SnapshotBrowseRunner struct { KubeCli kubernetes.Interface DynCli dynamic.Interface browserSteps SnapshotBrowserStepper pvc *v1.PersistentVolumeClaim pod *v1.Pod snapshot *snapv1.VolumeSnapshot } func (r *SnapshotBrowseRunner) RunSnapshotBrowse(ctx context.Context, args *types.SnapshotBrowseArgs) error { r.browserSteps = &snapshotBrowserSteps{ validateOps: &validateOperations{ kubeCli: r.KubeCli, dynCli: r.DynCli, }, versionFetchOps: &apiVersionFetch{ kubeCli: r.KubeCli, }, createAppOps: &applicationCreate{ kubeCli: r.KubeCli, }, portForwardOps: &portforward{}, kubeExecutor: &kubeExec{ kubeCli: r.KubeCli, }, cleanerOps: &cleanse{ kubeCli: r.KubeCli, dynCli: r.DynCli, }, } return r.RunSnapshotBrowseHelper(ctx, args) } func (r *SnapshotBrowseRunner) RunSnapshotBrowseHelper(ctx context.Context, args *types.SnapshotBrowseArgs) error { defer func() { fmt.Println("Cleaning up resources.") r.browserSteps.Cleanup(ctx, r.pvc, r.pod) }() if r.KubeCli == nil || r.DynCli == nil { return fmt.Errorf("cli uninitialized") } fmt.Println("Fetching the snapshot.") vs, sc, err := r.browserSteps.ValidateArgs(ctx, args) if err != nil { return errors.Wrap(err, "failed to validate arguments.") } r.snapshot = vs fmt.Println("Creating the browser pod.") r.pod, r.pvc, err = r.browserSteps.CreateInspectorApplication(ctx, args, r.snapshot, sc) if err != nil { return errors.Wrap(err, "failed to create inspector application.") } if args.ShowTree { fmt.Println("Printing the tree structure from root directory.") stdout, err := r.browserSteps.ExecuteTreeCommand(ctx, args, r.pod) if err != nil { return errors.Wrap(err, "failed to execute tree command in pod.") } fmt.Printf("\n%s\n\n", stdout) return nil } fmt.Println("Forwarding the port.") err = r.browserSteps.PortForwardAPod(ctx, r.pod, args.LocalPort) if err != nil { return errors.Wrap(err, "failed to port forward Pod.") } return nil } //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_snapshot_browser_stepper.go -package=mocks . SnapshotBrowserStepper type SnapshotBrowserStepper interface { ValidateArgs(ctx context.Context, args *types.SnapshotBrowseArgs) (*snapv1.VolumeSnapshot, *sv1.StorageClass, error) CreateInspectorApplication(ctx context.Context, args *types.SnapshotBrowseArgs, snapshot *snapv1.VolumeSnapshot, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, error) ExecuteTreeCommand(ctx context.Context, args *types.SnapshotBrowseArgs, pod *v1.Pod) (string, error) PortForwardAPod(ctx context.Context, pod *v1.Pod, localPort int) error Cleanup(ctx context.Context, pvc *v1.PersistentVolumeClaim, pod *v1.Pod) } type snapshotBrowserSteps struct { validateOps ArgumentValidator versionFetchOps ApiVersionFetcher createAppOps ApplicationCreator portForwardOps PortForwarder cleanerOps Cleaner kubeExecutor KubeExecutor SnapshotGroupVersion *metav1.GroupVersionForDiscovery } func (s *snapshotBrowserSteps) ValidateArgs(ctx context.Context, args *types.SnapshotBrowseArgs) (*snapv1.VolumeSnapshot, *sv1.StorageClass, error) { if err := args.Validate(); err != nil { return nil, nil, errors.Wrap(err, "failed to validate input arguments") } if err := s.validateOps.ValidateNamespace(ctx, args.Namespace); err != nil { return nil, nil, errors.Wrap(err, "failed to validate Namespace") } groupVersion, err := s.versionFetchOps.GetCSISnapshotGroupVersion() if err != nil { return nil, nil, errors.Wrap(err, "failed to fetch groupVersion") } s.SnapshotGroupVersion = groupVersion snapshot, err := s.validateOps.ValidateVolumeSnapshot(ctx, args.SnapshotName, args.Namespace, groupVersion) if err != nil { return nil, nil, errors.Wrap(err, "failed to validate VolumeSnapshot") } pvc, err := s.validateOps.ValidatePVC(ctx, *snapshot.Spec.Source.PersistentVolumeClaimName, args.Namespace) if err != nil { return nil, nil, errors.Wrap(err, "failed to validate source PVC") } sc, err := s.validateOps.ValidateStorageClass(ctx, *pvc.Spec.StorageClassName) if err != nil { return nil, nil, errors.Wrap(err, "failed to validate SC") } uVSC, err := s.validateOps.ValidateVolumeSnapshotClass(ctx, *snapshot.Spec.VolumeSnapshotClassName, groupVersion) if err != nil { return nil, nil, errors.Wrap(err, "failed to validate VolumeSnapshotClass") } vscDriver := getDriverNameFromUVSC(*uVSC, groupVersion.GroupVersion) if sc.Provisioner != vscDriver { return nil, nil, fmt.Errorf("provisioner for StorageClass (%s) and VolumeSnapshotClass driver (%s) are different", sc.Provisioner, vscDriver) } return snapshot, sc, nil } func (s *snapshotBrowserSteps) CreateInspectorApplication(ctx context.Context, args *types.SnapshotBrowseArgs, snapshot *snapv1.VolumeSnapshot, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, error) { snapshotAPIGroup := "snapshot.storage.k8s.io" snapshotKind := "VolumeSnapshot" dataSource := &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: snapshotKind, Name: snapshot.Name, } pvcArgs := &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: storageClass.Name, Namespace: args.Namespace, DataSource: dataSource, RestoreSize: snapshot.Status.RestoreSize, } pvc, err := s.createAppOps.CreatePVC(ctx, pvcArgs) if err != nil { return nil, nil, errors.Wrap(err, "failed to restore PVC") } podArgs := &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: args.Namespace, RunAsUser: args.RunAsUser, ContainerImage: "filebrowser/filebrowser:v2", ContainerArgs: []string{"--noauth", "-r", "/snapshot-data"}, PVCMap: map[string]types.VolumePath{ pvc.Name: { MountPath: "/snapshot-data", }, }, } if args.ShowTree { podArgs = &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: args.Namespace, RunAsUser: args.RunAsUser, ContainerImage: "alpine:3.19", Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", "while true; do sleep 3600; done"}, PVCMap: map[string]types.VolumePath{ pvc.Name: { MountPath: "/snapshot-data", }, }, } } pod, err := s.createAppOps.CreatePod(ctx, podArgs) if err != nil { return nil, pvc, errors.Wrap(err, "failed to create browse Pod") } if err = s.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil { return pod, pvc, errors.Wrap(err, "pod failed to become ready") } return pod, pvc, nil } func (s *snapshotBrowserSteps) ExecuteTreeCommand(ctx context.Context, args *types.SnapshotBrowseArgs, pod *v1.Pod) (string, error) { command := []string{"tree", "/snapshot-data"} stdout, err := s.kubeExecutor.Exec(ctx, args.Namespace, pod.Name, pod.Spec.Containers[0].Name, command) if err != nil { return "", errors.Wrapf(err, "error running command:(%v)", command) } return stdout, nil } func (s *snapshotBrowserSteps) PortForwardAPod(ctx context.Context, pod *v1.Pod, localPort int) error { var wg sync.WaitGroup wg.Add(1) stopChan, readyChan, errChan := make(chan struct{}, 1), make(chan struct{}, 1), make(chan string) out, errOut := new(bytes.Buffer), new(bytes.Buffer) cfg, err := s.portForwardOps.FetchRestConfig() if err != nil { return errors.New("failed to fetch rest config") } sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) go func() { <-sigs fmt.Println("Stopping port forward") close(stopChan) wg.Done() }() go func() { pfArgs := &types.PortForwardAPodRequest{ RestConfig: cfg, Pod: pod, LocalPort: localPort, PodPort: 80, OutStream: bytes.Buffer(*out), ErrOutStream: bytes.Buffer(*errOut), StopCh: stopChan, ReadyCh: readyChan, } err = s.portForwardOps.PortForwardAPod(pfArgs) if err != nil { errChan <- fmt.Sprintf("Failed to port forward (%s)", err.Error()) } }() select { case <-readyChan: url := fmt.Sprintf("http://localhost:%d/", localPort) fmt.Printf("Port forwarding is ready to get traffic. visit %s\n", url) openbrowser(url) wg.Wait() case msg := <-errChan: return errors.New(msg) } return nil } func (s *snapshotBrowserSteps) Cleanup(ctx context.Context, pvc *v1.PersistentVolumeClaim, pod *v1.Pod) { if pvc != nil { err := s.cleanerOps.DeletePVC(ctx, pvc.Name, pvc.Namespace) if err != nil { fmt.Println("Failed to delete PVC", pvc) } } if pod != nil { err := s.cleanerOps.DeletePod(ctx, pod.Name, pod.Namespace) if err != nil { fmt.Println("Failed to delete Pod", pod) } } } ================================================ FILE: pkg/csi/snapshot_inspector_steps_test.go ================================================ package csi import ( "context" "fmt" "github.com/golang/mock/gomock" "github.com/kastenhq/kubestr/pkg/common" "github.com/kastenhq/kubestr/pkg/csi/mocks" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" . "gopkg.in/check.v1" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) func (s *CSITestSuite) TestSnapshotBrowseValidateArgs(c *C) { ctx := context.Background() scName := "sc" vscName := "vsc" pvcName := "pvc" type fields struct { validateOps *mocks.MockArgumentValidator versionOps *mocks.MockApiVersionFetcher } for _, tc := range []struct { args *types.SnapshotBrowseArgs prepare func(f *fields) errChecker Checker }{ { // valid args args: &types.SnapshotBrowseArgs{ SnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return( &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), "vs", "ns", gomock.Any()).Return( &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", Namespace: "ns", }, Spec: snapv1.VolumeSnapshotSpec{ Source: snapv1.VolumeSnapshotSource{ PersistentVolumeClaimName: &pvcName, }, VolumeSnapshotClassName: &vscName, }, }, nil, ), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "pvc", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), scName).Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }).Return(&unstructured.Unstructured{ Object: map[string]interface{}{ common.VolSnapClassDriverKey: "p1", }, }, nil), ) }, errChecker: IsNil, }, { // driver mismatch args: &types.SnapshotBrowseArgs{ SnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return( &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), "vs", "ns", gomock.Any()).Return( &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", Namespace: "ns", }, Spec: snapv1.VolumeSnapshotSpec{ Source: snapv1.VolumeSnapshotSource{ PersistentVolumeClaimName: &pvcName, }, VolumeSnapshotClassName: &vscName, }, }, nil, ), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "pvc", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }).Return(&unstructured.Unstructured{ Object: map[string]interface{}{ common.VolSnapClassDriverKey: "p2", }, }, nil), ) }, errChecker: NotNil, }, { // vsc error args: &types.SnapshotBrowseArgs{ SnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil), f.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), "vs", "ns", gomock.Any()).Return( &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", Namespace: "ns", }, Spec: snapv1.VolumeSnapshotSpec{ Source: snapv1.VolumeSnapshotSource{ PersistentVolumeClaimName: &pvcName, }, VolumeSnapshotClassName: &vscName, }, }, nil, ), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "pvc", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("vsc error")), ) }, errChecker: NotNil, }, { // get driver versionn error args: &types.SnapshotBrowseArgs{ SnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, fmt.Errorf("driver version error")), ) }, errChecker: NotNil, }, { // sc error args: &types.SnapshotBrowseArgs{ SnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil), f.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), "vs", "ns", gomock.Any()).Return( &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", Namespace: "ns", }, Spec: snapv1.VolumeSnapshotSpec{ Source: snapv1.VolumeSnapshotSource{ PersistentVolumeClaimName: &pvcName, }, VolumeSnapshotClassName: &vscName, }, }, nil, ), f.validateOps.EXPECT().ValidatePVC(gomock.Any(), "pvc", "ns").Return( &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, Spec: v1.PersistentVolumeClaimSpec{ VolumeName: "vol", StorageClassName: &scName, }, }, nil, ), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("sc error")), ) }, errChecker: NotNil, }, { // validate vs error args: &types.SnapshotBrowseArgs{ SnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil), f.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("validate vs error")), ) }, errChecker: NotNil, }, { // validate ns error args: &types.SnapshotBrowseArgs{ SnapshotName: "vs", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(fmt.Errorf("validate ns error")), ) }, errChecker: NotNil, }, { // validate vs error args: &types.SnapshotBrowseArgs{ SnapshotName: "", Namespace: "ns", }, errChecker: NotNil, }, { // validate ns error args: &types.SnapshotBrowseArgs{ SnapshotName: "dfd", Namespace: "", }, errChecker: NotNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ validateOps: mocks.NewMockArgumentValidator(ctrl), versionOps: mocks.NewMockApiVersionFetcher(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &snapshotBrowserSteps{ validateOps: f.validateOps, versionFetchOps: f.versionOps, } _, _, err := stepper.ValidateArgs(ctx, tc.args) c.Check(err, tc.errChecker) } } func (s *CSITestSuite) TestCreateInspectorApplicationForSnapshot(c *C) { ctx := context.Background() resourceQuantity := resource.MustParse("1Gi") snapshotAPIGroup := "snapshot.storage.k8s.io" type fields struct { createAppOps *mocks.MockApplicationCreator } for _, tc := range []struct { args *types.SnapshotBrowseArgs snapshot *snapv1.VolumeSnapshot sc *sv1.StorageClass prepare func(f *fields) errChecker Checker podChecker Checker pvcChecker Checker }{ { args: &types.SnapshotBrowseArgs{ Namespace: "ns", RunAsUser: 100, }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: "sc", Namespace: "ns", DataSource: &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: "VolumeSnapshot", Name: "vs", }, RestoreSize: &resourceQuantity, }).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: "ns", ContainerArgs: []string{"--noauth", "-r", "/snapshot-data"}, RunAsUser: 100, ContainerImage: "filebrowser/filebrowser:v2", PVCMap: map[string]types.VolumePath{ "pvc": { MountPath: "/snapshot-data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", }, }, nil), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod").Return(nil), ) }, errChecker: IsNil, podChecker: NotNil, pvcChecker: NotNil, }, { args: &types.SnapshotBrowseArgs{ Namespace: "ns", RunAsUser: 100, }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: "sc", Namespace: "ns", DataSource: &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: "VolumeSnapshot", Name: "vs", }, RestoreSize: &resourceQuantity, }).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: "ns", ContainerArgs: []string{"--noauth", "-r", "/snapshot-data"}, RunAsUser: 100, ContainerImage: "filebrowser/filebrowser:v2", PVCMap: map[string]types.VolumePath{ "pvc": { MountPath: "/snapshot-data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", }, }, nil), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod").Return(fmt.Errorf("pod ready error")), ) }, errChecker: NotNil, podChecker: NotNil, pvcChecker: NotNil, }, { args: &types.SnapshotBrowseArgs{ Namespace: "ns", RunAsUser: 100, }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("pod error")), ) }, errChecker: NotNil, podChecker: IsNil, pvcChecker: NotNil, }, { args: &types.SnapshotBrowseArgs{ Namespace: "ns", RunAsUser: 100, }, sc: &sv1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: "sc", }, }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "vs", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("error")), ) }, errChecker: NotNil, podChecker: IsNil, pvcChecker: IsNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ createAppOps: mocks.NewMockApplicationCreator(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &snapshotBrowserSteps{ createAppOps: f.createAppOps, } pod, pvc, err := stepper.CreateInspectorApplication(ctx, tc.args, tc.snapshot, tc.sc) c.Check(err, tc.errChecker) c.Check(pod, tc.podChecker) c.Check(pvc, tc.pvcChecker) } } func (s *CSITestSuite) TestSnapshotBrowseCleanup(c *C) { ctx := context.Background() groupversion := &metav1.GroupVersionForDiscovery{ GroupVersion: "gv", Version: "v", } type fields struct { cleanerOps *mocks.MockCleaner } for _, tc := range []struct { pvc *v1.PersistentVolumeClaim pod *v1.Pod prepare func(f *fields) }{ { pvc: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, }, pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "ns", }, }, prepare: func(f *fields) { gomock.InOrder( f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc", "ns").Return(nil), f.cleanerOps.EXPECT().DeletePod(ctx, "pod", "ns").Return(nil), ) }, }, { pvc: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: "ns", }, }, pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "ns", }, }, prepare: func(f *fields) { gomock.InOrder( f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc", "ns").Return(fmt.Errorf("err")), f.cleanerOps.EXPECT().DeletePod(ctx, "pod", "ns").Return(fmt.Errorf("err")), ) }, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ cleanerOps: mocks.NewMockCleaner(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &snapshotBrowserSteps{ cleanerOps: f.cleanerOps, SnapshotGroupVersion: groupversion, } stepper.Cleanup(ctx, tc.pvc, tc.pod) } } ================================================ FILE: pkg/csi/snapshot_inspector_test.go ================================================ package csi import ( "context" "fmt" "github.com/golang/mock/gomock" "github.com/kastenhq/kubestr/pkg/csi/mocks" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" . "gopkg.in/check.v1" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" fakedynamic "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) func (s *CSITestSuite) TestRunSnapshotBrowseHelper(c *C) { ctx := context.Background() type fields struct { stepperOps *mocks.MockSnapshotBrowserStepper } for _, tc := range []struct { kubeCli kubernetes.Interface dynCli dynamic.Interface args *types.SnapshotBrowseArgs prepare func(f *fields) errChecker Checker }{ { // success kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.SnapshotBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return( &snapv1.VolumeSnapshot{}, &sv1.StorageClass{}, nil, ), f.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), &snapv1.VolumeSnapshot{}, &sv1.StorageClass{}, ).Return( &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, nil, ), f.stepperOps.EXPECT().PortForwardAPod(gomock.Any(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, gomock.Any(), ).Return(nil), f.stepperOps.EXPECT().Cleanup(gomock.Any(), &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, ), ) }, errChecker: IsNil, }, { // portforward failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.SnapshotBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().PortForwardAPod(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("portforward error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // createapp failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.SnapshotBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf("createapp error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // fetch snapshot failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.SnapshotBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf("snapshot error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // validate failure kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.SnapshotBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf("validate error")), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // emptycli failure kubeCli: nil, dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.SnapshotBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, { // emptydyncli failure kubeCli: fake.NewSimpleClientset(), dynCli: nil, args: &types.SnapshotBrowseArgs{}, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()), ) }, errChecker: NotNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ stepperOps: mocks.NewMockSnapshotBrowserStepper(ctrl), } if tc.prepare != nil { tc.prepare(&f) } runner := &SnapshotBrowseRunner{ KubeCli: tc.kubeCli, DynCli: tc.dynCli, browserSteps: f.stepperOps, } err := runner.RunSnapshotBrowseHelper(ctx, tc.args) c.Check(err, tc.errChecker) } } func (s *CSITestSuite) TestSnapshotBrowseRunner(c *C) { ctx := context.Background() r := &SnapshotBrowseRunner{ browserSteps: &snapshotBrowserSteps{}, } err := r.RunSnapshotBrowseHelper(ctx, nil) c.Check(err, NotNil) } ================================================ FILE: pkg/csi/snapshot_restore.go ================================================ package csi import ( "context" "fmt" "time" "github.com/kastenhq/kubestr/pkg/common" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) const ( originalPVCGenerateName = "kubestr-csi-original-pvc" originalPodGenerateName = "kubestr-csi-original-pod" clonedPVCGenerateName = "kubestr-csi-cloned-pvc" clonedPodGenerateName = "kubestr-csi-cloned-pod" createdByLabel = "created-by-kubestr-csi" clonePrefix = "kubestr-clone-" snapshotPrefix = "kubestr-snapshot-" ) type SnapshotRestoreRunner struct { KubeCli kubernetes.Interface DynCli dynamic.Interface srSteps SnapshotRestoreStepper } func (r *SnapshotRestoreRunner) RunSnapshotRestore(ctx context.Context, args *types.CSISnapshotRestoreArgs) (*types.CSISnapshotRestoreResults, error) { if r.KubeCli == nil || r.DynCli == nil { return &types.CSISnapshotRestoreResults{}, fmt.Errorf("cli uninitialized") } if args == nil { return &types.CSISnapshotRestoreResults{}, fmt.Errorf("snapshot args not specified") } r.srSteps = &snapshotRestoreSteps{ validateOps: &validateOperations{ kubeCli: r.KubeCli, dynCli: r.DynCli, }, versionFetchOps: &apiVersionFetch{ kubeCli: r.KubeCli, }, createAppOps: &applicationCreate{ kubeCli: r.KubeCli, k8sObjectReadyTimeout: args.K8sObjectReadyTimeout, }, dataValidatorOps: &validateData{ kubeCli: r.KubeCli, }, snapshotCreateOps: &snapshotCreate{ kubeCli: r.KubeCli, dynCli: r.DynCli, }, cleanerOps: &cleanse{ kubeCli: r.KubeCli, dynCli: r.DynCli, }, } return r.RunSnapshotRestoreHelper(ctx, args) } func (r *SnapshotRestoreRunner) RunSnapshotRestoreHelper(ctx context.Context, args *types.CSISnapshotRestoreArgs) (*types.CSISnapshotRestoreResults, error) { results := &types.CSISnapshotRestoreResults{} var err error if r.KubeCli == nil || r.DynCli == nil { return results, fmt.Errorf("cli uninitialized") } if err := r.srSteps.ValidateArgs(ctx, args); err != nil { return results, errors.Wrap(err, "failed to validate arguments") } data := time.Now().Format("20060102150405") fmt.Println("Creating application") results.OriginalPod, results.OriginalPVC, err = r.srSteps.CreateApplication(ctx, args, data) if err == nil { if results.OriginalPod != nil && results.OriginalPVC != nil { fmt.Printf(" -> Created pod (%s) and pvc (%s)\n", results.OriginalPod.Name, results.OriginalPVC.Name) } err = r.srSteps.ValidateData(ctx, results.OriginalPod, data) } snapName := snapshotPrefix + data if err == nil { fmt.Println("Taking a snapshot") results.Snapshot, err = r.srSteps.SnapshotApplication(ctx, args, results.OriginalPVC, snapName) } if err == nil { if results.Snapshot != nil { fmt.Printf(" -> Created snapshot (%s)\n", results.Snapshot.Name) } fmt.Println("Restoring application") results.ClonedPod, results.ClonedPVC, err = r.srSteps.RestoreApplication(ctx, args, results.Snapshot) } if err == nil { if results.ClonedPod != nil && results.ClonedPVC != nil { fmt.Printf(" -> Restored pod (%s) and pvc (%s)\n", results.ClonedPod.Name, results.ClonedPVC.Name) } err = r.srSteps.ValidateData(ctx, results.ClonedPod, data) } if args.Cleanup { fmt.Println("Cleaning up resources") // don't let Cancelled/DeadlineExceeded context affect cleanup r.srSteps.Cleanup(context.Background(), results) } return results, err } //go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_snapshot_restore_stepper.go -package=mocks . SnapshotRestoreStepper type SnapshotRestoreStepper interface { ValidateArgs(ctx context.Context, args *types.CSISnapshotRestoreArgs) error CreateApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, data string) (*v1.Pod, *v1.PersistentVolumeClaim, error) ValidateData(ctx context.Context, pod *v1.Pod, data string) error SnapshotApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, pvc *v1.PersistentVolumeClaim, snapshotName string) (*snapv1.VolumeSnapshot, error) RestoreApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, snapshot *snapv1.VolumeSnapshot) (*v1.Pod, *v1.PersistentVolumeClaim, error) Cleanup(ctx context.Context, results *types.CSISnapshotRestoreResults) } type snapshotRestoreSteps struct { validateOps ArgumentValidator versionFetchOps ApiVersionFetcher createAppOps ApplicationCreator dataValidatorOps DataValidator snapshotCreateOps SnapshotCreator cleanerOps Cleaner SnapshotGroupVersion *metav1.GroupVersionForDiscovery } func (s *snapshotRestoreSteps) ValidateArgs(ctx context.Context, args *types.CSISnapshotRestoreArgs) error { if err := args.Validate(); err != nil { return errors.Wrap(err, "failed to validate input arguments") } if err := s.validateOps.ValidateNamespace(ctx, args.Namespace); err != nil { return errors.Wrap(err, "failed to validate Namespace") } sc, err := s.validateOps.ValidateStorageClass(ctx, args.StorageClass) if err != nil { return errors.Wrap(err, "failed to validate Storageclass") } groupVersion, err := s.versionFetchOps.GetCSISnapshotGroupVersion() if err != nil { return errors.Wrap(err, "failed to fetch groupVersion") } s.SnapshotGroupVersion = groupVersion uVSC, err := s.validateOps.ValidateVolumeSnapshotClass(ctx, args.VolumeSnapshotClass, groupVersion) if err != nil { return errors.Wrap(err, "failed to validate VolumeSnapshotClass") } vscDriver := getDriverNameFromUVSC(*uVSC, groupVersion.GroupVersion) if sc.Provisioner != vscDriver { return fmt.Errorf("provisioner for StorageClass (%s) and VolumeSnapshotClass driver (%s) are different", sc.Provisioner, vscDriver) } return nil } func (s *snapshotRestoreSteps) CreateApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, genString string) (*v1.Pod, *v1.PersistentVolumeClaim, error) { pvcArgs := &types.CreatePVCArgs{ GenerateName: originalPVCGenerateName, StorageClass: args.StorageClass, Namespace: args.Namespace, } pvc, err := s.createAppOps.CreatePVC(ctx, pvcArgs) if err != nil { return nil, nil, errors.Wrap(err, "failed to create PVC") } podArgs := &types.CreatePodArgs{ GenerateName: originalPodGenerateName, Namespace: args.Namespace, RunAsUser: args.RunAsUser, ContainerImage: args.ContainerImage, Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", fmt.Sprintf("echo '%s' >> /data/out.txt; sync; tail -f /dev/null", genString)}, PVCMap: map[string]types.VolumePath{ pvc.Name: { MountPath: "/data", }, }, } pod, err := s.createAppOps.CreatePod(ctx, podArgs) if err != nil { return nil, pvc, errors.Wrap(err, "failed to create pod") } if err = s.createAppOps.WaitForPVCReady(ctx, args.Namespace, pvc.Name); err != nil { return pod, pvc, errors.Wrap(err, "PVC failed to become ready") } if err = s.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil { return pod, pvc, errors.Wrap(err, "pod failed to become ready") } return pod, pvc, nil } func (s *snapshotRestoreSteps) ValidateData(ctx context.Context, pod *v1.Pod, data string) error { podData, err := s.dataValidatorOps.FetchPodData(ctx, pod.Name, pod.Namespace) if err != nil { return errors.Wrap(err, "failed to fetch data from pod. Failure may be due to permissions issues, try again with runAsUser=1000 option") } if podData != data { return fmt.Errorf("string didn't match (%s , %s)", podData, data) } return nil } func (s *snapshotRestoreSteps) SnapshotApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, pvc *v1.PersistentVolumeClaim, snapshotName string) (*snapv1.VolumeSnapshot, error) { snapshotter, err := s.snapshotCreateOps.NewSnapshotter() if err != nil { return nil, errors.Wrap(err, "failed to load snapshotter") } createSnapshotArgs := &types.CreateSnapshotArgs{ Namespace: args.Namespace, PVCName: pvc.Name, VolumeSnapshotClass: args.VolumeSnapshotClass, SnapshotName: snapshotName, } snapshot, err := s.snapshotCreateOps.CreateSnapshot(ctx, snapshotter, createSnapshotArgs) if err != nil { return nil, errors.Wrap(err, "failed to create snapshot") } if !args.SkipCFSCheck { cfsArgs := &types.CreateFromSourceCheckArgs{ VolumeSnapshotClass: args.VolumeSnapshotClass, SnapshotName: snapshot.Name, Namespace: args.Namespace, } if err = s.snapshotCreateOps.CreateFromSourceCheck(ctx, snapshotter, cfsArgs, s.SnapshotGroupVersion); err != nil { return snapshot, errors.Wrap(err, "failed to create duplicate snapshot from source. To skip check use '--skipcfs=true' option") } } return snapshot, nil } func (s *snapshotRestoreSteps) RestoreApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, snapshot *snapv1.VolumeSnapshot) (*v1.Pod, *v1.PersistentVolumeClaim, error) { snapshotAPIGroup := "snapshot.storage.k8s.io" snapshotKind := "VolumeSnapshot" dataSource := &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: snapshotKind, Name: snapshot.Name, } pvcArgs := &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: args.StorageClass, Namespace: args.Namespace, DataSource: dataSource, RestoreSize: snapshot.Status.RestoreSize, } pvc, err := s.createAppOps.CreatePVC(ctx, pvcArgs) if err != nil { return nil, nil, errors.Wrap(err, "failed to restore PVC") } podArgs := &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: args.Namespace, RunAsUser: args.RunAsUser, ContainerImage: args.ContainerImage, Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", "tail -f /dev/null"}, PVCMap: map[string]types.VolumePath{ pvc.Name: { MountPath: "/data", }, }, } pod, err := s.createAppOps.CreatePod(ctx, podArgs) if err != nil { return nil, pvc, errors.Wrap(err, "failed to create restored pod") } if err = s.createAppOps.WaitForPVCReady(ctx, args.Namespace, pvc.Name); err != nil { return pod, pvc, errors.Wrap(err, "PVC failed to become ready") } if err = s.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil { return pod, pvc, errors.Wrap(err, "pod failed to become ready") } return pod, pvc, nil } func (s *snapshotRestoreSteps) Cleanup(ctx context.Context, results *types.CSISnapshotRestoreResults) { if results == nil { return } if results.OriginalPVC != nil { err := s.cleanerOps.DeletePVC(ctx, results.OriginalPVC.Name, results.OriginalPVC.Namespace) if err != nil { fmt.Printf("Error deleting original PVC (%s) - (%v)\n", results.OriginalPVC.Name, err) } } if results.OriginalPod != nil { err := s.cleanerOps.DeletePod(ctx, results.OriginalPod.Name, results.OriginalPod.Namespace) if err != nil { fmt.Printf("Error deleting original Pod (%s) - (%v)\n", results.OriginalPod.Name, err) } } if results.ClonedPVC != nil { err := s.cleanerOps.DeletePVC(ctx, results.ClonedPVC.Name, results.ClonedPVC.Namespace) if err != nil { fmt.Printf("Error deleting cloned PVC (%s) - (%v)\n", results.ClonedPVC.Name, err) } } if results.ClonedPod != nil { err := s.cleanerOps.DeletePod(ctx, results.ClonedPod.Name, results.ClonedPod.Namespace) if err != nil { fmt.Printf("Error deleting cloned Pod (%s) - (%v)\n", results.ClonedPod.Name, err) } } if results.Snapshot != nil { err := s.cleanerOps.DeleteSnapshot(ctx, results.Snapshot.Name, results.Snapshot.Namespace, s.SnapshotGroupVersion) if err != nil { fmt.Printf("Error deleting Snapshot (%s) - (%v)\n", results.Snapshot.Name, err) } } } func getDriverNameFromUVSC(vsc unstructured.Unstructured, version string) string { var driverName interface{} var ok bool if version != common.SnapshotVersion { return "" } driverName, ok = vsc.Object[common.VolSnapClassDriverKey] if !ok { return "" } driver, ok := driverName.(string) if !ok { return "" } return driver } ================================================ FILE: pkg/csi/snapshot_restore_steps_test.go ================================================ package csi import ( "context" "fmt" "github.com/golang/mock/gomock" "github.com/kastenhq/kubestr/pkg/common" "github.com/kastenhq/kubestr/pkg/csi/mocks" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" . "gopkg.in/check.v1" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) func (s *CSITestSuite) TestValidateArgs(c *C) { ctx := context.Background() type fields struct { validateOps *mocks.MockArgumentValidator versionOps *mocks.MockApiVersionFetcher } for _, tc := range []struct { args *types.CSISnapshotRestoreArgs prepare func(f *fields) errChecker Checker }{ { // valid args args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), "sc").Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return( &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }).Return(&unstructured.Unstructured{ Object: map[string]interface{}{ common.VolSnapClassDriverKey: "p1", }, }, nil), ) }, errChecker: IsNil, }, { // driver mismatch args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), "sc").Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return( &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }).Return(&unstructured.Unstructured{ Object: map[string]interface{}{ common.VolSnapClassDriverKey: "p2", }, }, nil), ) }, errChecker: NotNil, }, { // vsc error args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), "sc").Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return( &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }, nil), f.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), "vsc", &metav1.GroupVersionForDiscovery{ GroupVersion: common.SnapshotVersion, }).Return(nil, fmt.Errorf("vsc error")), ) }, errChecker: NotNil, }, { // groupversion error args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), "sc").Return( &sv1.StorageClass{ Provisioner: "p1", }, nil), f.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return( nil, fmt.Errorf("groupversion error")), ) }, errChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(nil), f.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), "sc").Return( nil, fmt.Errorf("sc error")), ) }, errChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", VolumeSnapshotClass: "vsc", Namespace: "ns", }, prepare: func(f *fields) { gomock.InOrder( f.validateOps.EXPECT().ValidateNamespace(gomock.Any(), "ns").Return(fmt.Errorf("ns error")), ) }, errChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ StorageClass: "", VolumeSnapshotClass: "vsc", Namespace: "ns", }, errChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", VolumeSnapshotClass: "", Namespace: "ns", }, errChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", VolumeSnapshotClass: "vsc", Namespace: "", }, errChecker: NotNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ validateOps: mocks.NewMockArgumentValidator(ctrl), versionOps: mocks.NewMockApiVersionFetcher(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &snapshotRestoreSteps{ validateOps: f.validateOps, versionFetchOps: f.versionOps, } err := stepper.ValidateArgs(ctx, tc.args) c.Check(err, tc.errChecker) } } func (s *CSITestSuite) TestCreateApplication(c *C) { ctx := context.Background() type fields struct { createAppOps *mocks.MockApplicationCreator } for _, tc := range []struct { args *types.CSISnapshotRestoreArgs genString string prepare func(f *fields) errChecker Checker podChecker Checker pvcChecker Checker }{ { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", Namespace: "ns", RunAsUser: 100, ContainerImage: "image", }, genString: "some string", prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{ GenerateName: originalPVCGenerateName, StorageClass: "sc", Namespace: "ns", }).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: originalPodGenerateName, Namespace: "ns", Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", "echo 'some string' >> /data/out.txt; sync; tail -f /dev/null"}, RunAsUser: 100, ContainerImage: "image", PVCMap: map[string]types.VolumePath{ "pvc1": { MountPath: "/data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", }, }, nil), f.createAppOps.EXPECT().WaitForPVCReady(gomock.Any(), "ns", "pvc1").Return(nil), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod1").Return(nil), ) }, errChecker: IsNil, podChecker: NotNil, pvcChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", Namespace: "ns", RunAsUser: 100, ContainerImage: "image", }, genString: "some string", prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{ GenerateName: originalPVCGenerateName, StorageClass: "sc", Namespace: "ns", }).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: originalPodGenerateName, Namespace: "ns", Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", "echo 'some string' >> /data/out.txt; sync; tail -f /dev/null"}, RunAsUser: 100, ContainerImage: "image", PVCMap: map[string]types.VolumePath{ "pvc1": { MountPath: "/data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", }, }, nil), f.createAppOps.EXPECT().WaitForPVCReady(gomock.Any(), "ns", "pvc1").Return(nil), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod1").Return(fmt.Errorf("pod ready error")), ) }, errChecker: NotNil, podChecker: NotNil, pvcChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", Namespace: "ns", RunAsUser: 100, ContainerImage: "image", }, genString: "some string", prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("create pod error")), ) }, errChecker: NotNil, podChecker: IsNil, pvcChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", Namespace: "ns", RunAsUser: 100, ContainerImage: "image", }, genString: "some string", prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("create pvc error")), ) }, errChecker: NotNil, podChecker: IsNil, pvcChecker: IsNil, }, { // PVC times out provisioning args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", Namespace: "ns", RunAsUser: 100, ContainerImage: "image", }, genString: "some string", prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{ GenerateName: originalPVCGenerateName, StorageClass: "sc", Namespace: "ns", }).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: originalPodGenerateName, Namespace: "ns", Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", "echo 'some string' >> /data/out.txt; sync; tail -f /dev/null"}, RunAsUser: 100, ContainerImage: "image", PVCMap: map[string]types.VolumePath{ "pvc1": { MountPath: "/data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", }, }, nil), f.createAppOps.EXPECT().WaitForPVCReady(gomock.Any(), "ns", "pvc1").Return(fmt.Errorf("rate: Wait(n=1) would exceed context deadline")), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pvc1").Times(0), ) }, errChecker: NotNil, podChecker: NotNil, pvcChecker: NotNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ createAppOps: mocks.NewMockApplicationCreator(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &snapshotRestoreSteps{ createAppOps: f.createAppOps, } pod, pvc, err := stepper.CreateApplication(ctx, tc.args, tc.genString) c.Check(err, tc.errChecker) c.Check(pod, tc.podChecker) c.Check(pvc, tc.pvcChecker) } } func (s *CSITestSuite) TestSnapshotApplication(c *C) { ctx := context.Background() snapshotter := &fakeSnapshotter{name: "snapshotter"} groupversion := &metav1.GroupVersionForDiscovery{ GroupVersion: "gv", Version: "v", } type fields struct { snapshotOps *mocks.MockSnapshotCreator } for _, tc := range []struct { args *types.CSISnapshotRestoreArgs pvc *v1.PersistentVolumeClaim snapshotName string prepare func(f *fields) errChecker Checker snapChecker Checker }{ { args: &types.CSISnapshotRestoreArgs{ Namespace: "ns", VolumeSnapshotClass: "vsc", }, pvc: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, snapshotName: "snap1", prepare: func(f *fields) { gomock.InOrder( f.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil), f.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "pvc1", VolumeSnapshotClass: "vsc", SnapshotName: "snap1", }).Return(&snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "createdName", }, }, nil), f.snapshotOps.EXPECT().CreateFromSourceCheck(gomock.Any(), snapshotter, &types.CreateFromSourceCheckArgs{ VolumeSnapshotClass: "vsc", SnapshotName: "createdName", Namespace: "ns", }, groupversion).Return(nil), ) }, errChecker: IsNil, snapChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ Namespace: "ns", VolumeSnapshotClass: "vsc", SkipCFSCheck: true, }, pvc: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, snapshotName: "snap1", prepare: func(f *fields) { gomock.InOrder( f.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil), f.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "pvc1", VolumeSnapshotClass: "vsc", SnapshotName: "snap1", }).Return(&snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "createdName", }, }, nil), ) }, errChecker: IsNil, snapChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ Namespace: "ns", VolumeSnapshotClass: "vsc", }, pvc: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, snapshotName: "snap1", prepare: func(f *fields) { gomock.InOrder( f.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil), f.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "pvc1", VolumeSnapshotClass: "vsc", SnapshotName: "snap1", }).Return(&snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "createdName", }, }, nil), f.snapshotOps.EXPECT().CreateFromSourceCheck(gomock.Any(), snapshotter, &types.CreateFromSourceCheckArgs{ VolumeSnapshotClass: "vsc", SnapshotName: "createdName", Namespace: "ns", }, groupversion).Return(fmt.Errorf("cfs error")), ) }, errChecker: NotNil, snapChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ Namespace: "ns", VolumeSnapshotClass: "vsc", }, pvc: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, snapshotName: "snap1", prepare: func(f *fields) { gomock.InOrder( f.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil), f.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{ Namespace: "ns", PVCName: "pvc1", VolumeSnapshotClass: "vsc", SnapshotName: "snap1", }).Return(nil, fmt.Errorf("create snapshot error")), ) }, errChecker: NotNil, snapChecker: IsNil, }, { args: &types.CSISnapshotRestoreArgs{ Namespace: "ns", VolumeSnapshotClass: "vsc", }, pvc: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, snapshotName: "snap1", prepare: func(f *fields) { gomock.InOrder( f.snapshotOps.EXPECT().NewSnapshotter().Return(nil, fmt.Errorf("snapshotter error")), ) }, errChecker: NotNil, snapChecker: IsNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ snapshotOps: mocks.NewMockSnapshotCreator(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &snapshotRestoreSteps{ snapshotCreateOps: f.snapshotOps, SnapshotGroupVersion: groupversion, } snapshot, err := stepper.SnapshotApplication(ctx, tc.args, tc.pvc, tc.snapshotName) c.Check(err, tc.errChecker) c.Check(snapshot, tc.snapChecker) } } func (s *CSITestSuite) TestRestoreApplication(c *C) { ctx := context.Background() resourceQuantity := resource.MustParse("1Gi") snapshotAPIGroup := "snapshot.storage.k8s.io" type fields struct { createAppOps *mocks.MockApplicationCreator } for _, tc := range []struct { args *types.CSISnapshotRestoreArgs snapshot *snapv1.VolumeSnapshot prepare func(f *fields) errChecker Checker podChecker Checker pvcChecker Checker }{ { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", Namespace: "ns", RunAsUser: 100, ContainerImage: "image", }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: "sc", Namespace: "ns", DataSource: &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: "VolumeSnapshot", Name: "snap1", }, RestoreSize: &resourceQuantity, }).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: "ns", Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", "tail -f /dev/null"}, RunAsUser: 100, ContainerImage: "image", PVCMap: map[string]types.VolumePath{ "pvc1": { MountPath: "/data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", }, }, nil), f.createAppOps.EXPECT().WaitForPVCReady(gomock.Any(), "ns", "pvc1").Return(nil), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod1").Return(nil), ) }, errChecker: IsNil, podChecker: NotNil, pvcChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", Namespace: "ns", RunAsUser: 100, ContainerImage: "image", }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{ GenerateName: clonedPVCGenerateName, StorageClass: "sc", Namespace: "ns", DataSource: &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: "VolumeSnapshot", Name: "snap1", }, RestoreSize: &resourceQuantity, }).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{ GenerateName: clonedPodGenerateName, Namespace: "ns", Command: []string{"/bin/sh"}, ContainerArgs: []string{"-c", "tail -f /dev/null"}, RunAsUser: 100, ContainerImage: "image", PVCMap: map[string]types.VolumePath{ "pvc1": { MountPath: "/data", }, }, }).Return(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", }, }, nil), f.createAppOps.EXPECT().WaitForPVCReady(gomock.Any(), "ns", "pvc1").Return(nil), f.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), "ns", "pod1").Return(fmt.Errorf("pod ready error")), ) }, errChecker: NotNil, podChecker: NotNil, pvcChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", Namespace: "ns", RunAsUser: 100, ContainerImage: "image", }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", }, }, nil), f.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("create pod error")), ) }, errChecker: NotNil, podChecker: IsNil, pvcChecker: NotNil, }, { args: &types.CSISnapshotRestoreArgs{ StorageClass: "sc", Namespace: "ns", RunAsUser: 100, ContainerImage: "image", }, snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snap1", }, Status: &snapv1.VolumeSnapshotStatus{ RestoreSize: &resourceQuantity, }, }, prepare: func(f *fields) { gomock.InOrder( f.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("create pvc error")), ) }, errChecker: NotNil, podChecker: IsNil, pvcChecker: IsNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ createAppOps: mocks.NewMockApplicationCreator(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &snapshotRestoreSteps{ createAppOps: f.createAppOps, } pod, pvc, err := stepper.RestoreApplication(ctx, tc.args, tc.snapshot) c.Check(err, tc.errChecker) c.Check(pod, tc.podChecker) c.Check(pvc, tc.pvcChecker) } } func (s *CSITestSuite) TestCleanup(c *C) { ctx := context.Background() groupversion := &metav1.GroupVersionForDiscovery{ GroupVersion: "gv", Version: "v", } type fields struct { cleanerOps *mocks.MockCleaner } for _, tc := range []struct { results *types.CSISnapshotRestoreResults prepare func(f *fields) }{ { results: nil, }, { results: &types.CSISnapshotRestoreResults{ OriginalPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, OriginalPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, ClonedPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc2", Namespace: "ns", }, }, ClonedPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Namespace: "ns", }, }, Snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snapshot", Namespace: "ns", }, }, }, prepare: func(f *fields) { gomock.InOrder( f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc1", "ns").Return(nil), f.cleanerOps.EXPECT().DeletePod(ctx, "pod1", "ns").Return(nil), f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc2", "ns").Return(nil), f.cleanerOps.EXPECT().DeletePod(ctx, "pod2", "ns").Return(nil), f.cleanerOps.EXPECT().DeleteSnapshot(ctx, "snapshot", "ns", groupversion).Return(nil), ) }, }, { results: &types.CSISnapshotRestoreResults{ OriginalPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, OriginalPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, ClonedPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc2", Namespace: "ns", }, }, ClonedPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Namespace: "ns", }, }, Snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snapshot", Namespace: "ns", }, }, }, prepare: func(f *fields) { gomock.InOrder( f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc1", "ns").Return(fmt.Errorf("err")), f.cleanerOps.EXPECT().DeletePod(ctx, "pod1", "ns").Return(fmt.Errorf("err")), f.cleanerOps.EXPECT().DeletePVC(ctx, "pvc2", "ns").Return(fmt.Errorf("err")), f.cleanerOps.EXPECT().DeletePod(ctx, "pod2", "ns").Return(fmt.Errorf("err")), f.cleanerOps.EXPECT().DeleteSnapshot(ctx, "snapshot", "ns", groupversion).Return(fmt.Errorf("err")), ) }, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ cleanerOps: mocks.NewMockCleaner(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &snapshotRestoreSteps{ cleanerOps: f.cleanerOps, SnapshotGroupVersion: groupversion, } stepper.Cleanup(ctx, tc.results) } } func (s *CSITestSuite) TestValidateData(c *C) { ctx := context.Background() type fields struct { validatorOps *mocks.MockDataValidator } for _, tc := range []struct { prepare func(f *fields) pod *v1.Pod data string errChecker Checker }{ { pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "ns", }, }, data: "somedata", prepare: func(f *fields) { gomock.InOrder( f.validatorOps.EXPECT().FetchPodData(context.Background(), "pod", "ns").Return("somedata", nil), ) }, errChecker: IsNil, }, { pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "ns", }, }, data: "somedata", prepare: func(f *fields) { gomock.InOrder( f.validatorOps.EXPECT().FetchPodData(context.Background(), "pod", "ns").Return("someotherdata", nil), ) }, errChecker: NotNil, }, { pod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: "ns", }, }, data: "somedata", prepare: func(f *fields) { gomock.InOrder( f.validatorOps.EXPECT().FetchPodData(context.Background(), "pod", "ns").Return("", fmt.Errorf("error")), ) }, errChecker: NotNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ validatorOps: mocks.NewMockDataValidator(ctrl), } if tc.prepare != nil { tc.prepare(&f) } stepper := &snapshotRestoreSteps{ dataValidatorOps: f.validatorOps, } err := stepper.ValidateData(ctx, tc.pod, tc.data) c.Check(err, tc.errChecker) } } ================================================ FILE: pkg/csi/snapshot_restore_test.go ================================================ package csi import ( "context" "fmt" "testing" "github.com/golang/mock/gomock" "github.com/kastenhq/kubestr/pkg/csi/mocks" "github.com/kastenhq/kubestr/pkg/csi/types" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" . "gopkg.in/check.v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" fakedynamic "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) func Test(t *testing.T) { TestingT(t) } type CSITestSuite struct{} var _ = Suite(&CSITestSuite{}) func (s *CSITestSuite) TestRunSnapshotRestoreHelper(c *C) { ctx := context.Background() type fields struct { stepperOps *mocks.MockSnapshotRestoreStepper } for _, tc := range []struct { kubeCli kubernetes.Interface dynCli dynamic.Interface args *types.CSISnapshotRestoreArgs prepare func(f *fields) result *types.CSISnapshotRestoreResults errChecker Checker }{ { // success kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.CSISnapshotRestoreArgs{ Cleanup: true, }, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return( &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, nil, ), f.stepperOps.EXPECT().ValidateData(gomock.Any(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, gomock.Any()).Return(nil), f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, gomock.Any(), ).Return( &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snapshot", }, }, nil, ), f.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snapshot", }, }, ).Return( &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Namespace: "ns", }, }, &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc2", Namespace: "ns", }, }, nil, ), f.stepperOps.EXPECT().ValidateData(gomock.Any(), &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Namespace: "ns", }, }, gomock.Any()).Return(nil), f.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any()).Return(), ) }, result: &types.CSISnapshotRestoreResults{ OriginalPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, OriginalPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, ClonedPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc2", Namespace: "ns", }, }, ClonedPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Namespace: "ns", }, }, Snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snapshot", }, }, }, errChecker: IsNil, }, { // no cleanup kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.CSISnapshotRestoreArgs{ Cleanup: false, }, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil), f.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), ) }, result: &types.CSISnapshotRestoreResults{}, errChecker: IsNil, }, { // restored data validation fails kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.CSISnapshotRestoreArgs{ Cleanup: false, }, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil), f.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("validation error")), ) }, result: &types.CSISnapshotRestoreResults{}, errChecker: NotNil, }, { // restore error, objects still returned kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.CSISnapshotRestoreArgs{ Cleanup: false, }, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil), f.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return( &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Namespace: "ns", }, }, &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc2", Namespace: "ns", }, }, fmt.Errorf("restore error"), ), ) }, result: &types.CSISnapshotRestoreResults{ ClonedPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc2", Namespace: "ns", }, }, ClonedPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod2", Namespace: "ns", }, }, }, errChecker: NotNil, }, { // restore error, no objects returned kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.CSISnapshotRestoreArgs{ Cleanup: false, }, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil), f.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf("restore error")), ) }, result: &types.CSISnapshotRestoreResults{}, errChecker: NotNil, }, { // snapshot error, object still returned kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.CSISnapshotRestoreArgs{ Cleanup: false, }, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return( &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snapshot", }, }, fmt.Errorf("snapshot error"), ), ) }, result: &types.CSISnapshotRestoreResults{ Snapshot: &snapv1.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: "snapshot", }, }, }, errChecker: NotNil, }, { // snapshot error, object not returned kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.CSISnapshotRestoreArgs{ Cleanup: false, }, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("snapshot error")), ) }, result: &types.CSISnapshotRestoreResults{}, errChecker: NotNil, }, { // created data validation error kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.CSISnapshotRestoreArgs{ Cleanup: false, }, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil), f.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("validation error")), ) }, result: &types.CSISnapshotRestoreResults{}, errChecker: NotNil, }, { // create error, objects still returned kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.CSISnapshotRestoreArgs{ Cleanup: false, }, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return( &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, fmt.Errorf("create error"), ), ) }, result: &types.CSISnapshotRestoreResults{ OriginalPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc1", Namespace: "ns", }, }, OriginalPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod1", Namespace: "ns", }, }, }, errChecker: NotNil, }, { // create error, objects not returned kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.CSISnapshotRestoreArgs{ Cleanup: false, }, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil), f.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf("create error")), ) }, result: &types.CSISnapshotRestoreResults{}, errChecker: NotNil, }, { // args validate error kubeCli: fake.NewSimpleClientset(), dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), args: &types.CSISnapshotRestoreArgs{ Cleanup: false, }, prepare: func(f *fields) { gomock.InOrder( f.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(fmt.Errorf("create error")), ) }, result: &types.CSISnapshotRestoreResults{}, errChecker: NotNil, }, { // empty cli kubeCli: nil, dynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()), result: &types.CSISnapshotRestoreResults{}, errChecker: NotNil, }, { // empty dyncli kubeCli: fake.NewSimpleClientset(), dynCli: nil, result: &types.CSISnapshotRestoreResults{}, errChecker: NotNil, }, } { ctrl := gomock.NewController(c) defer ctrl.Finish() f := fields{ stepperOps: mocks.NewMockSnapshotRestoreStepper(ctrl), } if tc.prepare != nil { tc.prepare(&f) } runner := &SnapshotRestoreRunner{ KubeCli: tc.kubeCli, DynCli: tc.dynCli, srSteps: f.stepperOps, } result, err := runner.RunSnapshotRestoreHelper(ctx, tc.args) c.Check(err, tc.errChecker) c.Assert(result, DeepEquals, tc.result) } } func (s *CSITestSuite) TestRunSnapshotRestoreRunner(c *C) { ctx := context.Background() r := &SnapshotRestoreRunner{} _, err := r.RunSnapshotRestore(ctx, nil) c.Check(err, NotNil) } ================================================ FILE: pkg/csi/types/csi_types.go ================================================ package types import ( "bytes" "fmt" "time" snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/client-go/rest" ) type CSISnapshotRestoreArgs struct { StorageClass string VolumeSnapshotClass string Namespace string RunAsUser int64 ContainerImage string Cleanup bool SkipCFSCheck bool K8sObjectReadyTimeout time.Duration } func (a *CSISnapshotRestoreArgs) Validate() error { if a.StorageClass == "" || a.VolumeSnapshotClass == "" || a.Namespace == "" { return fmt.Errorf("required fields are missing: (StorageClass, VolumeSnapshotClass, Namespace)") } return nil } type CSISnapshotRestoreResults struct { OriginalPVC *v1.PersistentVolumeClaim OriginalPod *v1.Pod Snapshot *snapv1.VolumeSnapshot ClonedPVC *v1.PersistentVolumeClaim ClonedPod *v1.Pod } type CreatePVCArgs struct { Name string // Only one of Name or GenerateName string // GenerateName should be specified. StorageClass string Namespace string DataSource *v1.TypedLocalObjectReference RestoreSize *resource.Quantity VolumeMode *v1.PersistentVolumeMode // missing implies v1.PersistentVolumeFilesystem } func (c *CreatePVCArgs) Validate() error { if (c.GenerateName == "" && c.Name == "") || (c.GenerateName != "" && c.Name != "") || c.StorageClass == "" || c.Namespace == "" { return fmt.Errorf("invalid CreatePVCArgs (%#v)", c) } return nil } type VolumePath struct { MountPath string // Only one of MountPath or DevicePath string // DevicePath should be specified. } type CreatePodArgs struct { Name string // Only one of Name or GenerateName string // GenerateName should be specified. PVCMap map[string]VolumePath Namespace string RunAsUser int64 ContainerImage string Command []string ContainerArgs []string } func (c *CreatePodArgs) Validate() error { if (c.GenerateName == "" && c.Name == "") || (c.GenerateName != "" && c.Name != "") || (c.Namespace == "") || (c.PVCMap == nil) { return fmt.Errorf("invalid CreatePodArgs (%#v)", c) } for pvcName, path := range c.PVCMap { if pvcName == "" { return fmt.Errorf("name for PVC is not set") } if path.DevicePath == "" && path.MountPath == "" { return fmt.Errorf("neither DevicePath nor MountPath are set, one is required") } if path.DevicePath != "" && path.MountPath != "" { return fmt.Errorf("both MountPath and DevicePath are set, only one must be set") } } return nil } type CreateSnapshotArgs struct { Namespace string PVCName string VolumeSnapshotClass string SnapshotName string } func (c *CreateSnapshotArgs) Validate() error { if c.Namespace == "" || c.PVCName == "" || c.VolumeSnapshotClass == "" || c.SnapshotName == "" { return fmt.Errorf("invalid CreateSnapshotArgs (%v)", c) } return nil } type FetchSnapshotArgs struct { Namespace string SnapshotName string } func (c *FetchSnapshotArgs) Validate() error { if c.Namespace == "" || c.SnapshotName == "" { return fmt.Errorf("invalid FetchSnapshotArgs (%v)", c) } return nil } type CreateFromSourceCheckArgs struct { VolumeSnapshotClass string SnapshotName string Namespace string } func (c *CreateFromSourceCheckArgs) Validate() error { if c.VolumeSnapshotClass == "" || c.SnapshotName == "" || c.Namespace == "" { return fmt.Errorf("invalid CreateFromSourceCheckArgs (%v)", c) } return nil } type PVCBrowseArgs struct { PVCName string Namespace string VolumeSnapshotClass string RunAsUser int64 LocalPort int ShowTree bool } func (p *PVCBrowseArgs) Validate() error { if p.PVCName == "" || p.Namespace == "" || p.VolumeSnapshotClass == "" { return fmt.Errorf("invalid PVCBrowseArgs (%v)", p) } return nil } type SnapshotBrowseArgs struct { SnapshotName string Namespace string RunAsUser int64 LocalPort int ShowTree bool } func (p *SnapshotBrowseArgs) Validate() error { if p.SnapshotName == "" || p.Namespace == "" { return fmt.Errorf("invalid SnapshotBrowseArgs (%v)", p) } return nil } type FileRestoreArgs struct { FromSnapshotName string FromPVCName string ToPVCName string Namespace string RunAsUser int64 LocalPort int Path string } func (f *FileRestoreArgs) Validate() error { if (f.FromSnapshotName == "" && f.FromPVCName == "") || (f.FromSnapshotName != "" && f.FromPVCName != "") { return fmt.Errorf("either --fromSnapshot or --fromPVC argument must be specified. Both cannot be specified together") } if f.FromPVCName != "" && f.ToPVCName == "" { return fmt.Errorf("--toPVC argument must be specified if using --fromPVC") } if f.Namespace == "" { return fmt.Errorf("invalid FileRestoreArgs (%v)", f) } return nil } type PortForwardAPodRequest struct { // RestConfig is the kubernetes config RestConfig *rest.Config // Pod is the selected pod for this port forwarding Pod *v1.Pod // LocalPort is the local port that will be selected to expose the PodPort LocalPort int // PodPort is the target port for the pod PodPort int // Streams configures where to write or read input from OutStream bytes.Buffer ErrOutStream bytes.Buffer // StopCh is the channel used to manage the port forward lifecycle StopCh <-chan struct{} // ReadyCh communicates when the tunnel is ready to receive traffic ReadyCh chan struct{} } ================================================ FILE: pkg/fio/_config.yml ================================================ baseurl: "/fio" ================================================ FILE: pkg/fio/dbench_license ================================================ MIT License Copyright (c) 2018 LogDNA Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: pkg/fio/fio.go ================================================ package fio import ( "context" "encoding/json" "fmt" "os" "path/filepath" "time" "github.com/briandowns/spinner" kankube "github.com/kanisterio/kanister/pkg/kube" "github.com/kastenhq/kubestr/pkg/common" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" ) const ( // DefaultNS describes the default namespace DefaultNS = "default" // PodNamespaceEnvKey describes the pod namespace env variable PodNamespaceEnvKey = "POD_NAMESPACE" // DefaultFIOJob describes the default FIO job DefaultFIOJob = "default-fio" // KubestrFIOJobGenName describes the generate name KubestrFIOJobGenName = "kubestr-fio" // ConfigMapJobKey is the default fio job key ConfigMapJobKey = "fiojob" // DefaultPVCSize is the default PVC size DefaultPVCSize = "100Gi" // PVCGenerateName is the name to generate for the PVC PVCGenerateName = "kubestr-fio-pvc-" // PodGenerateName is the name to generate for the POD PodGenerateName = "kubestr-fio-pod-" // ContainerName is the name of the container that runs the job ContainerName = "kubestr-fio" // PodNameEnvKey is the name of the variable used to get the current pod name PodNameEnvKey = "HOSTNAME" // ConfigMapMountPath is the path where we mount the configmap ConfigMapMountPath = "/etc/fio-config" // VolumeMountPath is the path where we mount the volume VolumeMountPath = "/dataset" // CreatedByFIOLabel is the key that desrcibes the label used to mark configmaps CreatedByFIOLabel = "createdbyfio" ) // FIO is an interface that represents FIO related commands type FIO interface { RunFio(ctx context.Context, args *RunFIOArgs) (*RunFIOResult, error) // , test config } // FIOrunner implments FIO type FIOrunner struct { Cli kubernetes.Interface fioSteps fioSteps } type RunFIOArgs struct { StorageClass string Size string Namespace string NodeSelector map[string]string FIOJobFilepath string FIOJobName string Image string } func (a *RunFIOArgs) Validate() error { if a.StorageClass == "" || a.Size == "" || a.Namespace == "" { return fmt.Errorf("required fields are missing: (StorageClass, Size, Namespace)") } return nil } type RunFIOResult struct { Size string `json:"size,omitempty"` StorageClass *sv1.StorageClass `json:"storageClass,omitempty"` FioConfig string `json:"fioConfig,omitempty"` Result FioResult `json:"result,omitempty"` } func (f *FIOrunner) RunFio(ctx context.Context, args *RunFIOArgs) (*RunFIOResult, error) { f.fioSteps = &fioStepper{ cli: f.Cli, podReady: &podReadyChecker{cli: f.Cli}, kubeExecutor: &kubeExecutor{cli: f.Cli}, } return f.RunFioHelper(ctx, args) } func (f *FIOrunner) RunFioHelper(ctx context.Context, args *RunFIOArgs) (*RunFIOResult, error) { // create a configmap with test parameters if f.Cli == nil { // for UT purposes return nil, fmt.Errorf("cli uninitialized") } if err := args.Validate(); err != nil { return nil, err } if err := f.fioSteps.validateNamespace(ctx, args.Namespace); err != nil { return nil, errors.Wrapf(err, "unable to find namespace (%s)", args.Namespace) } if err := f.fioSteps.validateNodeSelector(ctx, args.NodeSelector); err != nil { return nil, errors.Wrapf(err, "unable to find nodes satisfying node selector (%v)", args.NodeSelector) } sc, err := f.fioSteps.storageClassExists(ctx, args.StorageClass) if err != nil { return nil, errors.Wrap(err, "cannot find StorageClass") } configMap, err := f.fioSteps.loadConfigMap(ctx, args) if err != nil { return nil, errors.Wrap(err, "unable to create a ConfigMap") } defer func() { _ = f.fioSteps.deleteConfigMap(context.TODO(), configMap, args.Namespace) }() testFileName, err := fioTestFilename(configMap.Data) if err != nil { return nil, errors.Wrap(err, "failed to get test file name") } pvc, err := f.fioSteps.createPVC(ctx, args.StorageClass, args.Size, args.Namespace) if err != nil { return nil, errors.Wrap(err, "failed to create PVC") } defer func() { _ = f.fioSteps.deletePVC(context.TODO(), pvc.Name, args.Namespace) }() fmt.Println("PVC created", pvc.Name) pod, err := f.fioSteps.createPod(ctx, pvc.Name, configMap.Name, testFileName, args.Namespace, args.NodeSelector, args.Image) if err != nil { return nil, errors.Wrap(err, "failed to create POD") } defer func() { _ = f.fioSteps.deletePod(context.TODO(), pod.Name, args.Namespace) }() fmt.Println("Pod created", pod.Name) fmt.Printf("Running FIO test (%s) on StorageClass (%s) with a PVC of Size (%s)\n", testFileName, args.StorageClass, args.Size) fioOutput, err := f.fioSteps.runFIOCommand(ctx, pod.Name, ContainerName, testFileName, args.Namespace) if err != nil { return nil, errors.Wrap(err, "failed while running FIO test") } return &RunFIOResult{ Size: args.Size, StorageClass: sc, FioConfig: configMap.Data[testFileName], Result: fioOutput, }, nil } type fioSteps interface { validateNamespace(ctx context.Context, namespace string) error validateNodeSelector(ctx context.Context, selector map[string]string) error storageClassExists(ctx context.Context, storageClass string) (*sv1.StorageClass, error) loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error) createPVC(ctx context.Context, storageclass, size, namespace string) (*v1.PersistentVolumeClaim, error) deletePVC(ctx context.Context, pvcName, namespace string) error createPod(ctx context.Context, pvcName, configMapName, testFileName, namespace string, nodeSelector map[string]string, image string) (*v1.Pod, error) deletePod(ctx context.Context, podName, namespace string) error runFIOCommand(ctx context.Context, podName, containerName, testFileName, namespace string) (FioResult, error) deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap, namespace string) error } type fioStepper struct { cli kubernetes.Interface podReady waitForPodReadyInterface kubeExecutor kubeExecInterface } func (s *fioStepper) validateNamespace(ctx context.Context, namespace string) error { if _, err := s.cli.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}); err != nil { return err } return nil } func (s *fioStepper) validateNodeSelector(ctx context.Context, selector map[string]string) error { nodes, err := s.cli.CoreV1().Nodes().List(ctx, metav1.ListOptions{ LabelSelector: labels.SelectorFromSet(selector).String(), }) if err != nil { return err } if len(nodes.Items) == 0 { return fmt.Errorf("no nodes match selector") } return nil } func (s *fioStepper) storageClassExists(ctx context.Context, storageClass string) (*sv1.StorageClass, error) { return s.cli.StorageV1().StorageClasses().Get(ctx, storageClass, metav1.GetOptions{}) } func (s *fioStepper) loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error) { configMap := &v1.ConfigMap{ Data: make(map[string]string), } switch { case args.FIOJobFilepath != "": data, err := os.ReadFile(args.FIOJobFilepath) if err != nil { return nil, errors.Wrap(err, "file reading error") } configMap.Data[filepath.Base(args.FIOJobFilepath)] = string(data) case args.FIOJobName != "": if _, ok := fioJobs[args.FIOJobName]; !ok { return nil, fmt.Errorf("did not find FIO job (%s)", args.FIOJobName) } configMap.Data[args.FIOJobName] = fioJobs[args.FIOJobName] default: configMap.Data[DefaultFIOJob] = fioJobs[DefaultFIOJob] } // create configMap.GenerateName = KubestrFIOJobGenName configMap.Labels = map[string]string{CreatedByFIOLabel: "true"} cm, err := s.cli.CoreV1().ConfigMaps(args.Namespace).Create(ctx, configMap, metav1.CreateOptions{}) if err != nil { return nil, err } return cm, nil } func (s *fioStepper) createPVC(ctx context.Context, storageclass, size, namespace string) (*v1.PersistentVolumeClaim, error) { sizeResource, err := resource.ParseQuantity(size) if err != nil { return nil, errors.Wrapf(err, "unable to parse PVC size (%s)", size) } pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ GenerateName: PVCGenerateName, }, Spec: v1.PersistentVolumeClaimSpec{ StorageClassName: &storageclass, AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceName(v1.ResourceStorage): sizeResource, }, }, }, } cm, err := s.cli.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, metav1.CreateOptions{}) if err != nil { return nil, err } return cm, nil } func (s *fioStepper) deletePVC(ctx context.Context, pvcName, namespace string) error { return s.cli.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvcName, metav1.DeleteOptions{}) } func (s *fioStepper) createPod(ctx context.Context, pvcName, configMapName, testFileName, namespace string, nodeSelector map[string]string, image string) (*v1.Pod, error) { if pvcName == "" || configMapName == "" || testFileName == "" { return nil, fmt.Errorf("create pod missing required arguments") } if image == "" { image = common.DefaultPodImage } pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: PodGenerateName, Namespace: namespace, }, Spec: v1.PodSpec{ Containers: []v1.Container{{ Name: ContainerName, Command: []string{"/bin/sh"}, Args: []string{"-c", "tail -f /dev/null"}, VolumeMounts: []v1.VolumeMount{ {Name: "persistent-storage", MountPath: VolumeMountPath}, {Name: "config-map", MountPath: ConfigMapMountPath}, }, Image: image, }}, Volumes: []v1.Volume{ { Name: "persistent-storage", VolumeSource: v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName}, }, }, { Name: "config-map", VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{ Name: configMapName, }, }, }, }, }, NodeSelector: nodeSelector, }, } podRes, err := s.cli.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) if err != nil { return podRes, err } err = s.podReady.waitForPodReady(ctx, namespace, podRes.Name) if err != nil { return nil, err } podRes, err = s.cli.CoreV1().Pods(namespace).Get(ctx, podRes.Name, metav1.GetOptions{}) if err != nil { return podRes, err } return podRes, nil } func (s *fioStepper) deletePod(ctx context.Context, podName, namespace string) error { return s.cli.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{}) } func (s *fioStepper) runFIOCommand(ctx context.Context, podName, containerName, testFileName, namespace string) (FioResult, error) { jobFilePath := fmt.Sprintf("%s/%s", ConfigMapMountPath, testFileName) command := []string{"fio", "--directory", VolumeMountPath, jobFilePath, "--output-format=json"} done := make(chan bool, 1) var fioOut FioResult var stdout string var stderr string var err error timestart := time.Now() go func() { stdout, stderr, err = s.kubeExecutor.exec(ctx, namespace, podName, containerName, command) if err != nil || stderr != "" { if err == nil { err = fmt.Errorf("stderr when running FIO") } err = errors.Wrapf(err, "error running command:(%v), stderr:(%s)", command, stderr) } done <- true }() spin := spinner.New(spinner.CharSets[9], 100*time.Millisecond) spin.Start() <-done spin.Stop() elapsed := time.Since(timestart) fmt.Println("Elapsed time-", elapsed) if err != nil { return fioOut, err } err = json.Unmarshal([]byte(stdout), &fioOut) if err != nil { return fioOut, errors.Wrapf(err, "unable to parse fio output into JSON") } return fioOut, nil } // deleteConfigMap only deletes a config map if it has the label func (s *fioStepper) deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap, namespace string) error { if val, ok := configMap.Labels[CreatedByFIOLabel]; ok && val == "true" { return s.cli.CoreV1().ConfigMaps(namespace).Delete(ctx, configMap.Name, metav1.DeleteOptions{}) } return nil } func fioTestFilename(configMap map[string]string) (string, error) { if len(configMap) != 1 { return "", fmt.Errorf("unable to find fio file in configmap/more than one found %v", configMap) } var fileName string for key := range configMap { fileName = key } return fileName, nil } type waitForPodReadyInterface interface { waitForPodReady(ctx context.Context, namespace string, name string) error } type podReadyChecker struct { cli kubernetes.Interface } func (p *podReadyChecker) waitForPodReady(ctx context.Context, namespace, name string) error { return kankube.WaitForPodReady(ctx, p.cli, namespace, name) } type kubeExecInterface interface { exec(ctx context.Context, namespace, podName, containerName string, command []string) (string, string, error) } type kubeExecutor struct { cli kubernetes.Interface } func (k *kubeExecutor) exec(ctx context.Context, namespace, podName, containerName string, command []string) (string, string, error) { return kankube.Exec(ctx, k.cli, namespace, podName, containerName, command, nil) } ================================================ FILE: pkg/fio/fio_jobs.go ================================================ package fio var fioJobs = map[string]string{ DefaultFIOJob: testJob1, "randrw": randReadWrite, } var testJob1 = `[global] randrepeat=0 verify=0 ioengine=libaio direct=1 gtod_reduce=1 [job1] name=read_iops bs=4K iodepth=64 size=2G readwrite=randread time_based ramp_time=2s runtime=15s [job2] name=write_iops bs=4K iodepth=64 size=2G readwrite=randwrite time_based ramp_time=2s runtime=15s [job3] name=read_bw bs=128K iodepth=64 size=2G readwrite=randread time_based ramp_time=2s runtime=15s [job4] name=write_bw bs=128k iodepth=64 size=2G readwrite=randwrite time_based ramp_time=2s runtime=15s ` var randReadWrite = `[global] randrepeat=0 verify=0 ioengine=libaio direct=1 gtod_reduce=1 [job1] name=rand_readwrite bs=4K iodepth=64 size=4G readwrite=randrw rwmixread=75 time_based ramp_time=2s runtime=15s ` ================================================ FILE: pkg/fio/fio_test.go ================================================ package fio import ( "context" "encoding/json" "fmt" "os" "testing" "github.com/kastenhq/kubestr/pkg/common" "github.com/pkg/errors" . "gopkg.in/check.v1" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" k8stesting "k8s.io/client-go/testing" ) func Test(t *testing.T) { TestingT(t) } type FIOTestSuite struct{} var _ = Suite(&FIOTestSuite{}) func (s *FIOTestSuite) TestRunner(c *C) { ctx := context.Background() runner := &FIOrunner{ Cli: nil, } _, err := runner.RunFio(ctx, nil) c.Check(err, NotNil) } func (s *FIOTestSuite) TestRunFioHelper(c *C) { ctx := context.Background() for i, tc := range []struct { cli kubernetes.Interface stepper *fakeFioStepper args *RunFIOArgs expectedSteps []string checker Checker expectedCM string expectedSC string expectedSize string expectedTFN string expectedPVC string }{ { // invalid args (storageclass) cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{}, args: &RunFIOArgs{}, checker: NotNil, }, { // invalid args (size) cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{}, args: &RunFIOArgs{ StorageClass: "sc", }, checker: NotNil, }, { // invalid args (namespace) cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{}, args: &RunFIOArgs{ StorageClass: "sc", Size: "100Gi", }, checker: NotNil, }, { // namespace doesn't exist cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{ vnErr: fmt.Errorf("namespace Err"), }, args: &RunFIOArgs{ StorageClass: "sc", Size: "100Gi", Namespace: "foo", }, checker: NotNil, expectedSteps: []string{"VN"}, }, { // no node satisfies selector cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{ vnsErr: fmt.Errorf("node selector Err"), }, args: &RunFIOArgs{ StorageClass: "sc", Size: "100Gi", Namespace: "foo", }, checker: NotNil, expectedSteps: []string{"VN", "VNS"}, }, { // storageclass not found cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{ sceErr: fmt.Errorf("storageclass Err"), }, args: &RunFIOArgs{ StorageClass: "sc", Size: "100Gi", Namespace: "foo", }, checker: NotNil, expectedSteps: []string{"VN", "VNS", "SCE"}, }, { // success cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{ lcmConfigMap: &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "CM1", }, Data: map[string]string{ "testfile.fio": "testfiledata", }, }, cPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "PVC", }, }, cPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "Pod", }, }, }, args: &RunFIOArgs{ StorageClass: "sc", Size: "100Gi", Namespace: "foo", }, checker: IsNil, expectedSteps: []string{"VN", "VNS", "SCE", "LCM", "CPVC", "CPOD", "RFIOC", "DPOD", "DPVC", "DCM"}, expectedSC: "sc", expectedSize: DefaultPVCSize, expectedTFN: "testfile.fio", expectedCM: "CM1", expectedPVC: "PVC", }, { // fio test error cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{ lcmConfigMap: &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "CM1", }, Data: map[string]string{ "testfile.fio": "testfiledata", }, }, cPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "PVC", }, }, cPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "Pod", }, }, rFIOErr: fmt.Errorf("run fio error"), }, args: &RunFIOArgs{ StorageClass: "sc", Size: "100Gi", Namespace: "foo", }, checker: NotNil, expectedSteps: []string{"VN", "VNS", "SCE", "LCM", "CPVC", "CPOD", "RFIOC", "DPOD", "DPVC", "DCM"}, }, { // create pod error cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{ lcmConfigMap: &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "CM1", }, Data: map[string]string{ "testfile.fio": "testfiledata", }, }, cPVC: &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "PVC", }, }, cPod: &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "Pod", }, }, cPodErr: fmt.Errorf("pod create error"), }, args: &RunFIOArgs{ StorageClass: "sc", Size: "100Gi", Namespace: "foo", }, checker: NotNil, expectedSteps: []string{"VN", "VNS", "SCE", "LCM", "CPVC", "CPOD", "DPVC", "DCM"}, }, { // create PVC error cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{ lcmConfigMap: &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "CM1", }, Data: map[string]string{ "testfile.fio": "testfiledata", }, }, cPVCErr: fmt.Errorf("pvc create error"), }, args: &RunFIOArgs{ StorageClass: "sc", Size: "100Gi", Namespace: "foo", }, checker: NotNil, expectedSteps: []string{"VN", "VNS", "SCE", "LCM", "CPVC", "DCM"}, }, { // testfilename retrieval error, more than one provided cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{ lcmConfigMap: &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "CM1", }, Data: map[string]string{ "testfile.fio": "testfiledata", "testfile.fio2": "testfiledata", }, }, }, args: &RunFIOArgs{ StorageClass: "sc", Size: "100Gi", Namespace: "foo", }, checker: NotNil, expectedSteps: []string{"VN", "VNS", "SCE", "LCM", "DCM"}, }, { // load configmap error cli: fake.NewSimpleClientset(), stepper: &fakeFioStepper{ lcmErr: fmt.Errorf("failed to load configmap"), }, args: &RunFIOArgs{ StorageClass: "sc", Size: "100Gi", Namespace: "foo", }, checker: NotNil, expectedSteps: []string{"VN", "VNS", "SCE", "LCM"}, }, } { c.Log(i) fio := &FIOrunner{ Cli: tc.cli, fioSteps: tc.stepper, } _, err := fio.RunFioHelper(ctx, tc.args) c.Check(err, tc.checker) c.Assert(tc.stepper.steps, DeepEquals, tc.expectedSteps) if err == nil { c.Assert(tc.expectedSC, Equals, tc.stepper.cPVCExpSC) c.Assert(tc.expectedSize, Equals, tc.stepper.cPVCExpSize) c.Assert(tc.expectedTFN, Equals, tc.stepper.cPodExpFN) c.Assert(tc.expectedCM, Equals, tc.stepper.cPodExpCM) c.Assert(tc.expectedPVC, Equals, tc.stepper.cPodExpPVC) } } } type fakeFioStepper struct { steps []string vnErr error vnsErr error sceSC *storagev1.StorageClass sceErr error lcmConfigMap *v1.ConfigMap lcmErr error cPVCExpSC string cPVCExpSize string cPVC *v1.PersistentVolumeClaim cPVCErr error dPVCErr error cPodExpFN string cPodExpCM string cPodExpPVC string cPod *v1.Pod cPodErr error dPodErr error rFIOout FioResult rFIOErr error } func (f *fakeFioStepper) validateNamespace(ctx context.Context, namespace string) error { f.steps = append(f.steps, "VN") return f.vnErr } func (f *fakeFioStepper) validateNodeSelector(ctx context.Context, selector map[string]string) error { f.steps = append(f.steps, "VNS") return f.vnsErr } func (f *fakeFioStepper) storageClassExists(ctx context.Context, storageClass string) (*storagev1.StorageClass, error) { f.steps = append(f.steps, "SCE") return f.sceSC, f.sceErr } func (f *fakeFioStepper) loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error) { f.steps = append(f.steps, "LCM") return f.lcmConfigMap, f.lcmErr } func (f *fakeFioStepper) createPVC(ctx context.Context, storageclass, size, namespace string) (*v1.PersistentVolumeClaim, error) { f.steps = append(f.steps, "CPVC") f.cPVCExpSC = storageclass f.cPVCExpSize = size return f.cPVC, f.cPVCErr } func (f *fakeFioStepper) deletePVC(ctx context.Context, pvcName, namespace string) error { f.steps = append(f.steps, "DPVC") return f.dPVCErr } func (f *fakeFioStepper) createPod(ctx context.Context, pvcName, configMapName, testFileName, namespace string, nodeSelector map[string]string, image string) (*v1.Pod, error) { f.steps = append(f.steps, "CPOD") f.cPodExpCM = configMapName f.cPodExpFN = testFileName f.cPodExpPVC = pvcName return f.cPod, f.cPodErr } func (f *fakeFioStepper) deletePod(ctx context.Context, podName, namespace string) error { f.steps = append(f.steps, "DPOD") return f.dPodErr } func (f *fakeFioStepper) runFIOCommand(ctx context.Context, podName, containerName, testFileName, namespace string) (FioResult, error) { f.steps = append(f.steps, "RFIOC") return f.rFIOout, f.rFIOErr } func (f *fakeFioStepper) deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap, namespace string) error { f.steps = append(f.steps, "DCM") return nil } func (s *FIOTestSuite) TestStorageClassExists(c *C) { ctx := context.Background() for _, tc := range []struct { cli kubernetes.Interface storageClass string checker Checker }{ { cli: fake.NewSimpleClientset(), storageClass: "sc", checker: NotNil, }, { cli: fake.NewSimpleClientset(&storagev1.StorageClass{ObjectMeta: metav1.ObjectMeta{Name: "sc"}}), storageClass: "sc", checker: IsNil, }, } { stepper := &fioStepper{cli: tc.cli} _, err := stepper.storageClassExists(ctx, tc.storageClass) c.Check(err, tc.checker) } } func (s *FIOTestSuite) TestValidateNamespace(c *C) { ctx := context.Background() stepper := &fioStepper{cli: fake.NewSimpleClientset()} err := stepper.validateNamespace(ctx, "ns") c.Assert(err, NotNil) stepper = &fioStepper{cli: fake.NewSimpleClientset(&v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "ns", }, })} err = stepper.validateNamespace(ctx, "ns") c.Assert(err, IsNil) } func (s *FIOTestSuite) TestValidateNodeSelector(c *C) { ctx := context.Background() stepper := &fioStepper{cli: fake.NewSimpleClientset( &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "a", Labels: map[string]string{ "key": "value", }, }, }, &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "b", Labels: map[string]string{ "key": "value", "foo": "bar", }, }, }, )} for _, tc := range []struct { nodeSelector map[string]string checker Checker }{ { // 0 nodes satisfy nodeSelector: map[string]string{ "not": "present", }, checker: NotNil, }, { // 1 node satisfies nodeSelector: map[string]string{ "key": "value", "foo": "bar", }, checker: IsNil, }, { // 2 nodes satisfy nodeSelector: map[string]string{ "key": "value", }, checker: IsNil, }, } { err := stepper.validateNodeSelector(ctx, tc.nodeSelector) c.Check(err, tc.checker) } } func (s *FIOTestSuite) TestLoadConfigMap(c *C) { ctx := context.Background() file, err := os.CreateTemp("", "tempTLCfile") c.Check(err, IsNil) defer func() { c.Check(os.Remove(file.Name()), IsNil) }() for i, tc := range []struct { cli kubernetes.Interface configMapName string jobName string args *RunFIOArgs cmChecker Checker errChecker Checker failCreates bool hasLabel bool }{ { // provided file name not found cli: fake.NewSimpleClientset(), args: &RunFIOArgs{ FIOJobFilepath: "nonexistantfile", }, cmChecker: IsNil, errChecker: NotNil, }, { // specified config map found cli: fake.NewSimpleClientset(), args: &RunFIOArgs{ FIOJobFilepath: file.Name(), FIOJobName: "random", // won't use this case }, cmChecker: NotNil, errChecker: IsNil, }, { // specified job name, not found cli: fake.NewSimpleClientset(), args: &RunFIOArgs{ FIOJobName: "random", }, cmChecker: IsNil, errChecker: NotNil, }, { // specified job name, found cli: fake.NewSimpleClientset(), args: &RunFIOArgs{ FIOJobName: DefaultFIOJob, }, cmChecker: NotNil, errChecker: IsNil, }, { // use default job cli: fake.NewSimpleClientset(), args: &RunFIOArgs{}, cmChecker: NotNil, errChecker: IsNil, }, { // Fails to create configMap cli: fake.NewSimpleClientset(), cmChecker: IsNil, errChecker: NotNil, args: &RunFIOArgs{}, failCreates: true, }, } { c.Log(i) stepper := &fioStepper{cli: tc.cli} if tc.failCreates { stepper.cli.(*fake.Clientset).PrependReactor("create", "configmaps", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, errors.New("Error creating object") }) } cm, err := stepper.loadConfigMap(ctx, tc.args) c.Check(err, tc.errChecker) c.Check(cm, tc.cmChecker) if cm != nil { _, ok := cm.Labels[CreatedByFIOLabel] c.Assert(ok, Equals, true) } } } func (s *FIOTestSuite) TestCreatePVC(c *C) { ctx := context.Background() for _, tc := range []struct { cli kubernetes.Interface storageclass string size string errChecker Checker pvcChecker Checker failCreates bool }{ { cli: fake.NewSimpleClientset(), storageclass: "fakesc", size: "20Gi", errChecker: IsNil, pvcChecker: NotNil, }, { // Fails to create pvc cli: fake.NewSimpleClientset(), storageclass: "fakesc", size: "10Gi", pvcChecker: IsNil, errChecker: NotNil, failCreates: true, }, { // parse error cli: fake.NewSimpleClientset(), storageclass: "fakesc", size: "Not a quantity", pvcChecker: IsNil, errChecker: NotNil, }, } { stepper := &fioStepper{cli: tc.cli} if tc.failCreates { stepper.cli.(*fake.Clientset).PrependReactor("create", "*", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, errors.New("Error creating object") }) } pvc, err := stepper.createPVC(ctx, tc.storageclass, tc.size, DefaultNS) c.Check(err, tc.errChecker) c.Check(pvc, tc.pvcChecker) if pvc != nil { c.Assert(pvc.GenerateName, Equals, PVCGenerateName) c.Assert(*pvc.Spec.StorageClassName, Equals, tc.storageclass) value, ok := pvc.Spec.Resources.Requests.Storage().AsInt64() c.Assert(ok, Equals, true) c.Assert(value, Equals, int64(21474836480)) } } } func (s *FIOTestSuite) TestDeletePVC(c *C) { ctx := context.Background() stepper := &fioStepper{cli: fake.NewSimpleClientset(&v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "pvc", Namespace: DefaultNS, }})} err := stepper.deletePVC(ctx, "pvc", DefaultNS) c.Assert(err, IsNil) err = stepper.deletePVC(ctx, "pvc", DefaultNS) c.Assert(err, NotNil) } func (s *FIOTestSuite) TestCreatPod(c *C) { ctx := context.Background() for _, tc := range []struct { pvcName string configMapName string testFileName string nodeSelector map[string]string image string reactor []k8stesting.Reactor podReadyErr error errChecker Checker }{ { pvcName: "pvc", configMapName: "cm", testFileName: "testfile", nodeSelector: map[string]string{ "key": "", "foo": "bar", }, errChecker: IsNil, }, { pvcName: "pvc", configMapName: "cm", testFileName: "testfile", errChecker: NotNil, reactor: []k8stesting.Reactor{ &k8stesting.SimpleReactor{ Verb: "create", Resource: "*", Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod"}}, nil }, }, &k8stesting.SimpleReactor{ Verb: "get", Resource: "*", Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, errors.New("Error getting object") }, }, }, }, { pvcName: "pvc", configMapName: "cm", testFileName: "testfile", errChecker: NotNil, reactor: []k8stesting.Reactor{ &k8stesting.SimpleReactor{ Verb: "create", Resource: "*", Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod"}}, nil }, }, }, podReadyErr: fmt.Errorf("pod ready error"), }, { pvcName: "pvc", configMapName: "cm", testFileName: "testfile", errChecker: NotNil, reactor: []k8stesting.Reactor{ &k8stesting.SimpleReactor{ Verb: "create", Resource: "*", Reaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, fmt.Errorf("pod create error") }, }, }, }, { pvcName: "pvc", configMapName: "cm", testFileName: "", image: "someotherimage", errChecker: NotNil, }, { pvcName: "", configMapName: "cm", testFileName: "asdf", errChecker: NotNil, }, { pvcName: "pvc", configMapName: "", testFileName: "asd", errChecker: NotNil, }, } { stepper := &fioStepper{ cli: fake.NewSimpleClientset(), podReady: &fakePodReadyChecker{prcErr: tc.podReadyErr}, } if tc.reactor != nil { stepper.cli.(*fake.Clientset).ReactionChain = tc.reactor } pod, err := stepper.createPod(ctx, tc.pvcName, tc.configMapName, tc.testFileName, DefaultNS, tc.nodeSelector, tc.image) c.Check(err, tc.errChecker) if err == nil { c.Assert(pod.GenerateName, Equals, PodGenerateName) c.Assert(len(pod.Spec.Volumes), Equals, 2) for _, vol := range pod.Spec.Volumes { switch vol.Name { case "persistent-storage": c.Assert(vol.PersistentVolumeClaim.ClaimName, Equals, tc.pvcName) case "config-map": c.Assert(vol.ConfigMap.Name, Equals, tc.configMapName) } } c.Assert(len(pod.Spec.Containers), Equals, 1) c.Assert(pod.Spec.Containers[0].Name, Equals, ContainerName) c.Assert(pod.Spec.Containers[0].Command, DeepEquals, []string{"/bin/sh"}) c.Assert(pod.Spec.Containers[0].Args, DeepEquals, []string{"-c", "tail -f /dev/null"}) c.Assert(pod.Spec.Containers[0].VolumeMounts, DeepEquals, []v1.VolumeMount{ {Name: "persistent-storage", MountPath: VolumeMountPath}, {Name: "config-map", MountPath: ConfigMapMountPath}, }) if tc.image == "" { c.Assert(pod.Spec.Containers[0].Image, Equals, common.DefaultPodImage) } else { c.Assert(pod.Spec.Containers[0].Image, Equals, tc.image) } c.Assert(pod.Spec.NodeSelector, DeepEquals, tc.nodeSelector) } } } func (s *FIOTestSuite) TestDeletePod(c *C) { ctx := context.Background() stepper := &fioStepper{cli: fake.NewSimpleClientset(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", Namespace: DefaultNS, }})} err := stepper.deletePod(ctx, "pod", DefaultNS) c.Assert(err, IsNil) err = stepper.deletePod(ctx, "pod", DefaultNS) c.Assert(err, NotNil) } func (s *FIOTestSuite) TestFioTestFileName(c *C) { for _, tc := range []struct { configMap map[string]string retVal string errChecker Checker }{ { configMap: map[string]string{ "testfile.fio": "some test data", }, retVal: "testfile.fio", errChecker: IsNil, }, { configMap: map[string]string{ "ConfigMapSCKey": "storageclass", "ConfigMapSizeKey": "10Gi", "testfile.fio": "some test data", }, retVal: "", errChecker: NotNil, }, } { ret, err := fioTestFilename(tc.configMap) c.Check(err, tc.errChecker) c.Assert(ret, Equals, tc.retVal) } } func (s *FIOTestSuite) TestRunFioCommand(c *C) { var parsedout FioResult err := json.Unmarshal([]byte(parsableFioOutput), &parsedout) c.Assert(err, IsNil) ctx := context.Background() for _, tc := range []struct { executor *fakeKubeExecutor errChecker Checker podName string containerName string testFileName string out FioResult }{ { executor: &fakeKubeExecutor{ keErr: nil, keStrErr: "", keStdOut: parsableFioOutput, }, errChecker: IsNil, podName: "pod", containerName: "container", testFileName: "tfName", out: parsedout, }, { executor: &fakeKubeExecutor{ keErr: nil, keStrErr: "", keStdOut: "unparsable string", }, errChecker: NotNil, podName: "pod", containerName: "container", testFileName: "tfName", out: FioResult{}, }, { executor: &fakeKubeExecutor{ keErr: fmt.Errorf("kubeexec err"), keStrErr: "", keStdOut: "unparsable string", }, errChecker: NotNil, podName: "pod", containerName: "container", testFileName: "tfName", out: FioResult{}, }, { executor: &fakeKubeExecutor{ keErr: nil, keStrErr: "execution error", keStdOut: "unparsable string", }, errChecker: NotNil, podName: "pod", containerName: "container", testFileName: "tfName", out: FioResult{}, }, } { stepper := &fioStepper{ kubeExecutor: tc.executor, } out, err := stepper.runFIOCommand(ctx, tc.podName, tc.containerName, tc.testFileName, DefaultNS) c.Check(err, tc.errChecker) c.Assert(out, DeepEquals, tc.out) c.Assert(tc.executor.keInPodName, Equals, tc.podName) c.Assert(tc.executor.keInContainerName, Equals, tc.containerName) c.Assert(len(tc.executor.keInCommand), Equals, 5) c.Assert(tc.executor.keInCommand[0], Equals, "fio") c.Assert(tc.executor.keInCommand[1], Equals, "--directory") c.Assert(tc.executor.keInCommand[2], Equals, VolumeMountPath) jobFilePath := fmt.Sprintf("%s/%s", ConfigMapMountPath, tc.testFileName) c.Assert(tc.executor.keInCommand[3], Equals, jobFilePath) } } func (s *FIOTestSuite) TestDeleteConfigMap(c *C) { ctx := context.Background() defaultNS := "default" c.Check(os.Setenv(PodNamespaceEnvKey, defaultNS), IsNil) for _, tc := range []struct { cli kubernetes.Interface cm *v1.ConfigMap errChecker Checker lenCMList int }{ { // Don't delete it unless it has the label cli: fake.NewSimpleClientset(&v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "cm", Namespace: defaultNS, }, }), cm: &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "cm", Namespace: defaultNS, }, }, errChecker: IsNil, lenCMList: 1, }, { // Has label delete cli: fake.NewSimpleClientset(&v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "cm", Namespace: defaultNS, }, }), cm: &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "cm", Namespace: defaultNS, Labels: map[string]string{ CreatedByFIOLabel: "true", }, }, }, errChecker: IsNil, lenCMList: 0, }, { // No cm exists cli: fake.NewSimpleClientset(), cm: &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "cm", Namespace: defaultNS, Labels: map[string]string{ CreatedByFIOLabel: "true", }, }, }, errChecker: NotNil, }, } { stepper := &fioStepper{cli: tc.cli} err := stepper.deleteConfigMap(ctx, tc.cm, DefaultNS) c.Check(err, tc.errChecker) if err == nil { list, err := stepper.cli.CoreV1().ConfigMaps(defaultNS).List(ctx, metav1.ListOptions{}) c.Check(err, IsNil) c.Assert(len(list.Items), Equals, tc.lenCMList) } } c.Check(os.Unsetenv(PodNamespaceEnvKey), IsNil) } func (s *FIOTestSuite) TestWaitForPodReady(c *C) { ctx := context.Background() prChecker := &podReadyChecker{ cli: fake.NewSimpleClientset(), } err := prChecker.waitForPodReady(ctx, "somens", "somePod") c.Check(err, NotNil) prChecker.cli = fake.NewSimpleClientset(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "somePod", Namespace: "somens", }, Status: v1.PodStatus{ Phase: v1.PodRunning, }, }) } type fakePodReadyChecker struct { prcErr error } func (f *fakePodReadyChecker) waitForPodReady(ctx context.Context, namespace, name string) error { return f.prcErr } type fakeKubeExecutor struct { keErr error keStdOut string keStrErr string keInNS string keInPodName string keInContainerName string keInCommand []string } func (fk *fakeKubeExecutor) exec(_ context.Context, namespace, podName, containerName string, command []string) (string, string, error) { fk.keInNS = namespace fk.keInPodName = podName fk.keInContainerName = containerName fk.keInCommand = command return fk.keStdOut, fk.keStrErr, fk.keErr } ================================================ FILE: pkg/fio/fio_types.go ================================================ package fio import "fmt" type FioResult struct { FioVersion string `json:"fio version,omitempty"` Timestamp int64 `json:"timestamp,omitempty"` TimestampMS int64 `json:"timestamp_ms,omitempty"` Time string `json:"time,omitempty"` GlobalOptions FioGlobalOptions `json:"global options,omitempty"` Jobs []FioJobs `json:"jobs,omitempty"` DiskUtil []FioDiskUtil `json:"disk_util,omitempty"` } func (f FioResult) Print() string { var res string res += fmt.Sprintf("FIO version - %s\n", f.FioVersion) res += fmt.Sprintf("Global options - %s\n\n", f.GlobalOptions.Print()) for _, job := range f.Jobs { res += fmt.Sprintf("%s\n", job.Print()) } res += "Disk stats (read/write):\n" for _, du := range f.DiskUtil { res += fmt.Sprintf("%s\n", du.Print()) } return res } type FioGlobalOptions struct { Directory string `json:"directory,omitempty"` RandRepeat string `json:"randrepeat,omitempty"` Verify string `json:"verify,omitempty"` IOEngine string `json:"ioengine,omitempty"` Direct string `json:"direct,omitempty"` GtodReduce string `json:"gtod_reduce,omitempty"` } func (g FioGlobalOptions) Print() string { return fmt.Sprintf("ioengine=%s verify=%s direct=%s gtod_reduce=%s", g.IOEngine, g.Verify, g.Direct, g.GtodReduce) } type FioJobs struct { JobName string `json:"jobname,omitempty"` GroupID int `json:"groupid,omitempty"` Error int `json:"error,omitempty"` Eta int `json:"eta,omitempty"` Elapsed int `json:"elapsed,omitempty"` JobOptions FioJobOptions `json:"job options,omitempty"` Read FioStats `json:"read,omitempty"` Write FioStats `json:"write,omitempty"` Trim FioStats `json:"trim,omitempty"` Sync FioStats `json:"sync,omitempty"` JobRuntime int32 `json:"job_runtime,omitempty"` UsrCpu float32 `json:"usr_cpu,omitempty"` SysCpu float32 `json:"sys_cpu,omitempty"` Ctx int32 `json:"ctx,omitempty"` MajF int32 `json:"majf,omitempty"` MinF int32 `json:"minf,omitempty"` IoDepthLevel FioDepth `json:"iodepth_level,omitempty"` IoDepthSubmit FioDepth `json:"iodepth_submit,omitempty"` IoDepthComplete FioDepth `json:"iodepth_complete,omitempty"` LatencyNs FioLatency `json:"latency_ns,omitempty"` LatencyUs FioLatency `json:"latency_us,omitempty"` LatencyMs FioLatency `json:"latency_ms,omitempty"` LatencyDepth int32 `json:"latency_depth,omitempty"` LatencyTarget int32 `json:"latency_target,omitempty"` LatencyPercentile float32 `json:"latency_percentile,omitempty"` LatencyWindow int32 `json:"latency_window,omitempty"` } func (j FioJobs) Print() string { var job string job += fmt.Sprintf("%s\n", j.JobOptions.Print()) if j.Read.Iops != 0 || j.Read.BW != 0 { job += fmt.Sprintf("read:\n%s\n", j.Read.Print()) } if j.Write.Iops != 0 || j.Write.BW != 0 { job += fmt.Sprintf("write:\n%s\n", j.Write.Print()) } return job } type FioJobOptions struct { Name string `json:"name,omitempty"` BS string `json:"bs,omitempty"` IoDepth string `json:"iodepth,omitempty"` Size string `json:"size,omitempty"` RW string `json:"rw,omitempty"` RampTime string `json:"ramp_time,omitempty"` RunTime string `json:"runtime,omitempty"` } func (o FioJobOptions) Print() string { return fmt.Sprintf("JobName: %s\n blocksize=%s filesize=%s iodepth=%s rw=%s", o.Name, o.BS, o.Size, o.IoDepth, o.RW) } type FioStats struct { IOBytes int64 `json:"io_bytes,omitempty"` IOKBytes int64 `json:"io_kbytes,omitempty"` BWBytes int64 `json:"bw_bytes,omitempty"` BW int64 `json:"bw,omitempty"` Iops float32 `json:"iops,omitempty"` Runtime int64 `json:"runtime,omitempty"` TotalIos int64 `json:"total_ios,omitempty"` ShortIos int64 `json:"short_ios,omitempty"` DropIos int64 `json:"drop_ios,omitempty"` SlatNs FioNS `json:"slat_ns,omitempty"` ClatNs FioNS `json:"clat_ns,omitempty"` LatNs FioNS `json:"lat_ns,omitempty"` BwMin int64 `json:"bw_min,omitempty"` BwMax int64 `json:"bw_max,omitempty"` BwAgg float32 `json:"bw_agg,omitempty"` BwMean float32 `json:"bw_mean,omitempty"` BwDev float32 `json:"bw_dev,omitempty"` BwSamples int32 `json:"bw_samples,omitempty"` IopsMin int32 `json:"iops_min,omitempty"` IopsMax int32 `json:"iops_max,omitempty"` IopsMean float32 `json:"iops_mean,omitempty"` IopsStdDev float32 `json:"iops_stddev,omitempty"` IopsSamples int32 `json:"iops_samples,omitempty"` } func (s FioStats) Print() string { var stats string stats += fmt.Sprintf(" IOPS=%f BW(KiB/s)=%d\n", s.Iops, s.BW) stats += fmt.Sprintf(" iops: min=%d max=%d avg=%f\n", s.IopsMin, s.IopsMax, s.IopsMean) stats += fmt.Sprintf(" bw(KiB/s): min=%d max=%d avg=%f", s.BwMin, s.BwMax, s.BwMean) return stats } type FioNS struct { Min int64 `json:"min,omitempty"` Max int64 `json:"max,omitempty"` Mean float32 `json:"mean,omitempty"` StdDev float32 `json:"stddev,omitempty"` N int64 `json:"N,omitempty"` } type FioDepth struct { FioDepth0 float32 `json:"0,omitempty"` FioDepth1 float32 `json:"1,omitempty"` FioDepth2 float32 `json:"2,omitempty"` FioDepth4 float32 `json:"4,omitempty"` FioDepth8 float32 `json:"8,omitempty"` FioDepth16 float32 `json:"16,omitempty"` FioDepth32 float32 `json:"32,omitempty"` FioDepth64 float32 `json:"64,omitempty"` FioDepthGE64 float32 `json:">=64,omitempty"` } type FioLatency struct { FioLat2 float32 `json:"2,omitempty"` FioLat4 float32 `json:"4,omitempty"` FioLat10 float32 `json:"10,omitempty"` FioLat20 float32 `json:"20,omitempty"` FioLat50 float32 `json:"50,omitempty"` FioLat100 float32 `json:"100,omitempty"` FioLat250 float32 `json:"250,omitempty"` FioLat500 float32 `json:"500,omitempty"` FioLat750 float32 `json:"750,omitempty"` FioLat1000 float32 `json:"1000,omitempty"` FioLat2000 float32 `json:"2000,omitempty"` FioLatGE2000 float32 `json:">=2000,omitempty"` } type FioDiskUtil struct { Name string `json:"name,omitempty"` ReadIos int64 `json:"read_ios,omitempty"` WriteIos int64 `json:"write_ios,omitempty"` ReadMerges int64 `json:"read_merges,omitempty"` WriteMerges int64 `json:"write_merges,omitempty"` ReadTicks int64 `json:"read_ticks,omitempty"` WriteTicks int64 `json:"write_ticks,omitempty"` InQueue int64 `json:"in_queue,omitempty"` Util float32 `json:"util,omitempty"` } func (d FioDiskUtil) Print() string { //Disk stats (read/write): //rbd4: ios=30022/11982, merge=0/313, ticks=1028675/1022768, in_queue=2063740, util=99.67% var du string du += fmt.Sprintf(" %s: ios=%d/%d merge=%d/%d ticks=%d/%d in_queue=%d, util=%f%%", d.Name, d.ReadIos, d.WriteIos, d.ReadMerges, d.WriteMerges, d.ReadTicks, d.WriteTicks, d.InQueue, d.Util) return du } ================================================ FILE: pkg/fio/parsable_fio_output.go ================================================ package fio const parsableFioOutput = `{ "fio version" : "fio-3.20", "timestamp" : 1611952282, "timestamp_ms" : 1611952282240, "time" : "Fri Jan 29 20:31:22 2021", "global options" : { "directory" : "/dataset", "randrepeat" : "0", "verify" : "0", "ioengine" : "libaio", "direct" : "1", "gtod_reduce" : "1" }, "jobs" : [ { "jobname" : "read_iops", "groupid" : 0, "error" : 0, "eta" : 0, "elapsed" : 18, "job options" : { "name" : "read_iops", "bs" : "4K", "iodepth" : "64", "size" : "2G", "rw" : "randread", "ramp_time" : "2s", "runtime" : "15s" }, "read" : { "io_bytes" : 61886464, "io_kbytes" : 60436, "bw_bytes" : 4039322, "bw" : 3944, "iops" : 982.050780, "runtime" : 15321, "total_ios" : 15046, "short_ios" : 0, "drop_ios" : 0, "slat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "clat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "lat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "bw_min" : 1919, "bw_max" : 7664, "bw_agg" : 100.000000, "bw_mean" : 3995.000000, "bw_dev" : 1200.820783, "bw_samples" : 30, "iops_min" : 479, "iops_max" : 1916, "iops_mean" : 998.566667, "iops_stddev" : 300.247677, "iops_samples" : 30 }, "write" : { "io_bytes" : 0, "io_kbytes" : 0, "bw_bytes" : 0, "bw" : 0, "iops" : 0.000000, "runtime" : 0, "total_ios" : 0, "short_ios" : 0, "drop_ios" : 0, "slat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "clat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "lat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "bw_min" : 0, "bw_max" : 0, "bw_agg" : 0.000000, "bw_mean" : 0.000000, "bw_dev" : 0.000000, "bw_samples" : 0, "iops_min" : 0, "iops_max" : 0, "iops_mean" : 0.000000, "iops_stddev" : 0.000000, "iops_samples" : 0 }, "trim" : { "io_bytes" : 0, "io_kbytes" : 0, "bw_bytes" : 0, "bw" : 0, "iops" : 0.000000, "runtime" : 0, "total_ios" : 0, "short_ios" : 0, "drop_ios" : 0, "slat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "clat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "lat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "bw_min" : 0, "bw_max" : 0, "bw_agg" : 0.000000, "bw_mean" : 0.000000, "bw_dev" : 0.000000, "bw_samples" : 0, "iops_min" : 0, "iops_max" : 0, "iops_mean" : 0.000000, "iops_stddev" : 0.000000, "iops_samples" : 0 }, "sync" : { "total_ios" : 0, "lat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 } }, "job_runtime" : 15322, "usr_cpu" : 1.109516, "sys_cpu" : 3.648349, "ctx" : 17991, "majf" : 1, "minf" : 62, "iodepth_level" : { "1" : 0.000000, "2" : 0.000000, "4" : 0.000000, "8" : 0.000000, "16" : 0.000000, "32" : 0.000000, ">=64" : 100.000000 }, "iodepth_submit" : { "0" : 0.000000, "4" : 100.000000, "8" : 0.000000, "16" : 0.000000, "32" : 0.000000, "64" : 0.000000, ">=64" : 0.000000 }, "iodepth_complete" : { "0" : 0.000000, "4" : 99.993354, "8" : 0.000000, "16" : 0.000000, "32" : 0.000000, "64" : 0.100000, ">=64" : 0.000000 }, "latency_ns" : { "2" : 0.000000, "4" : 0.000000, "10" : 0.000000, "20" : 0.000000, "50" : 0.000000, "100" : 0.000000, "250" : 0.000000, "500" : 0.000000, "750" : 0.000000, "1000" : 0.000000 }, "latency_us" : { "2" : 0.000000, "4" : 0.000000, "10" : 0.000000, "20" : 0.000000, "50" : 0.000000, "100" : 0.000000, "250" : 0.000000, "500" : 0.000000, "750" : 0.000000, "1000" : 0.000000 }, "latency_ms" : { "2" : 0.000000, "4" : 0.000000, "10" : 0.000000, "20" : 0.000000, "50" : 0.000000, "100" : 0.000000, "250" : 0.000000, "500" : 0.000000, "750" : 0.000000, "1000" : 0.000000, "2000" : 0.000000, ">=2000" : 0.000000 }, "latency_depth" : 64, "latency_target" : 0, "latency_percentile" : 100.000000, "latency_window" : 0 }, { "jobname" : "write_iops", "groupid" : 0, "error" : 0, "eta" : 0, "elapsed" : 18, "job options" : { "name" : "write_iops", "bs" : "4K", "iodepth" : "64", "size" : "2G", "rw" : "randwrite", "ramp_time" : "2s", "runtime" : "15s" }, "read" : { "io_bytes" : 0, "io_kbytes" : 0, "bw_bytes" : 0, "bw" : 0, "iops" : 0.000000, "runtime" : 0, "total_ios" : 0, "short_ios" : 0, "drop_ios" : 0, "slat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "clat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "lat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "bw_min" : 0, "bw_max" : 0, "bw_agg" : 0.000000, "bw_mean" : 0.000000, "bw_dev" : 0.000000, "bw_samples" : 0, "iops_min" : 0, "iops_max" : 0, "iops_mean" : 0.000000, "iops_stddev" : 0.000000, "iops_samples" : 0 }, "write" : { "io_bytes" : 24805376, "io_kbytes" : 24224, "bw_bytes" : 1616406, "bw" : 1578, "iops" : 390.525218, "runtime" : 15346, "total_ios" : 5993, "short_ios" : 0, "drop_ios" : 0, "slat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "clat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "lat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "bw_min" : 512, "bw_max" : 2706, "bw_agg" : 100.000000, "bw_mean" : 1581.066667, "bw_dev" : 476.641189, "bw_samples" : 30, "iops_min" : 128, "iops_max" : 676, "iops_mean" : 395.033333, "iops_stddev" : 119.151738, "iops_samples" : 30 }, "trim" : { "io_bytes" : 0, "io_kbytes" : 0, "bw_bytes" : 0, "bw" : 0, "iops" : 0.000000, "runtime" : 0, "total_ios" : 0, "short_ios" : 0, "drop_ios" : 0, "slat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "clat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "lat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 }, "bw_min" : 0, "bw_max" : 0, "bw_agg" : 0.000000, "bw_mean" : 0.000000, "bw_dev" : 0.000000, "bw_samples" : 0, "iops_min" : 0, "iops_max" : 0, "iops_mean" : 0.000000, "iops_stddev" : 0.000000, "iops_samples" : 0 }, "sync" : { "total_ios" : 0, "lat_ns" : { "min" : 0, "max" : 0, "mean" : 0.000000, "stddev" : 0.000000, "N" : 0 } }, "job_runtime" : 15345, "usr_cpu" : 0.508309, "sys_cpu" : 2.280873, "ctx" : 7411, "majf" : 1, "minf" : 63, "iodepth_level" : { "1" : 0.000000, "2" : 0.000000, "4" : 0.000000, "8" : 0.000000, "16" : 0.000000, "32" : 0.000000, ">=64" : 100.000000 }, "iodepth_submit" : { "0" : 0.000000, "4" : 100.000000, "8" : 0.000000, "16" : 0.000000, "32" : 0.000000, "64" : 0.000000, ">=64" : 0.000000 }, "iodepth_complete" : { "0" : 0.000000, "4" : 99.983317, "8" : 0.000000, "16" : 0.000000, "32" : 0.000000, "64" : 0.100000, ">=64" : 0.000000 }, "latency_ns" : { "2" : 0.000000, "4" : 0.000000, "10" : 0.000000, "20" : 0.000000, "50" : 0.000000, "100" : 0.000000, "250" : 0.000000, "500" : 0.000000, "750" : 0.000000, "1000" : 0.000000 }, "latency_us" : { "2" : 0.000000, "4" : 0.000000, "10" : 0.000000, "20" : 0.000000, "50" : 0.000000, "100" : 0.000000, "250" : 0.000000, "500" : 0.000000, "750" : 0.000000, "1000" : 0.000000 }, "latency_ms" : { "2" : 0.000000, "4" : 0.000000, "10" : 0.000000, "20" : 0.000000, "50" : 0.000000, "100" : 0.000000, "250" : 0.000000, "500" : 0.000000, "750" : 0.000000, "1000" : 0.000000, "2000" : 0.000000, ">=2000" : 0.000000 }, "latency_depth" : 64, "latency_target" : 0, "latency_percentile" : 100.000000, "latency_window" : 0 } ], "disk_util" : [ { "name" : "rbd4", "read_ios" : 16957, "write_ios" : 6896, "read_merges" : 0, "write_merges" : 207, "read_ticks" : 1072290, "write_ticks" : 1043421, "in_queue" : 2119036, "util" : 99.712875 } ] }` ================================================ FILE: pkg/kubestr/csi-drivers.go ================================================ package kubestr // THIS FILE IS AUTO_GENERATED. // To generate file run "go generate" at the top level // This file must be checked in. var CSIDriverList = []*CSIDriver{ {NameUrl: "[Alicloud Disk](https://github.com/AliyunContainerService/csi-plugin)", DriverName: "diskplugin.csi.alibabacloud.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Alicloud Disk", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot"}, {NameUrl: "[Alicloud NAS](https://github.com/AliyunContainerService/csi-plugin)", DriverName: "nasplugin.csi.alibabacloud.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Alicloud Network Attached Storage (NAS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "No", Features: ""}, {NameUrl: "[Alicloud OSS](https://github.com/AliyunContainerService/csi-plugin)", DriverName: "ossplugin.csi.alibabacloud.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Alicloud Object Storage Service (OSS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "No", Features: ""}, {NameUrl: "[ArStor CSI](https://github.com/huayun-docs/csi-driver-arstor)", DriverName: "arstor.csi.huayun.io", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Huayun Storage Service (ArStor)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[AWS Elastic Block Storage](https://github.com/kubernetes-sigs/aws-ebs-csi-driver)", DriverName: "ebs.csi.aws.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for AWS Elastic Block Storage (EBS)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion"}, {NameUrl: "[AWS Elastic File System](https://github.com/aws/aws-efs-csi-driver)", DriverName: "efs.csi.aws.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for AWS Elastic File System (EFS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "No", Features: ""}, {NameUrl: "[AWS FSx for Lustre](https://github.com/aws/aws-fsx-csi-driver)", DriverName: "fsx.csi.aws.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for AWS FSx for Lustre (EBS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[Azure disk](https://github.com/kubernetes-sigs/azuredisk-csi-driver)", DriverName: "disk.csi.azure.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Azure disk", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[Azure file](https://github.com/kubernetes-sigs/azurefile-csi-driver)", DriverName: "file.csi.azure.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Azure file", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[BeeGFS](https://github.com/NetApp/beegfs-csi-driver)", DriverName: "beegfs.csi.netapp.com", Versions: "v1.3", Description: "A Container Storage Interface (CSI) Driver for the [BeeGFS](https://www.beegfs.io/) Parallel File System", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[Bigtera VirtualStor (block)](https://github.com/bigtera-ce/ceph-csi)", DriverName: "csi.block.bigtera.com", Versions: "v0.3, v1.0.0, v1.1.0", Description: "A Container Storage Interface (CSI) Driver for Bigtera VirtualStor block storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion"}, {NameUrl: "[Bigtera VirtualStor (filesystem)](https://github.com/bigtera-ce/ceph-csi)", DriverName: "csi.fs.bigtera.com", Versions: "v0.3, v1.0.0, v1.1.0", Description: "A Container Storage Interface (CSI) Driver for Bigtera VirtualStor filesystem", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion"}, {NameUrl: "[BizFlyCloud Block Storage](https://github.com/bizflycloud/csi-bizflycloud)", DriverName: "volume.csi.bizflycloud.vn", Versions: "v1.2", Description: "A Container Storage Interface (CSI) Driver for BizFly Cloud block storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion"}, {NameUrl: "[CephFS](https://github.com/ceph/ceph-csi)", DriverName: "cephfs.csi.ceph.com", Versions: "v0.3, >=v1.0.0", Description: "A Container Storage Interface (CSI) Driver for CephFS", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion, Snapshot, Cloning"}, {NameUrl: "[Ceph RBD](https://github.com/ceph/ceph-csi)", DriverName: "rbd.csi.ceph.com", Versions: "v0.3, >=v1.0.0", Description: "A Container Storage Interface (CSI) Driver for Ceph RBD", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Topology, Cloning"}, {NameUrl: "[ChubaoFS](https://github.com/chubaofs/chubaofs-csi)", DriverName: "csi.chubaofs.com", Versions: "v1.0.0", Description: "A Container Storage Interface (CSI) Driver for ChubaoFS Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[Cinder](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/cinder)", DriverName: "cinder.csi.openstack.org", Versions: "v0.3, v1.0, v1.1.0, v1.2.0, v1.3.0", Description: "A Container Storage Interface (CSI) Driver for OpenStack Cinder", Persistence: "Persistent and Ephemeral", AccessModes: "Depends on the storage backend used", DynamicProvisioning: "Yes, if storage backend supports it", Features: "Raw Block, Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[cloudscale.ch](https://github.com/cloudscale-ch/csi-cloudscale)", DriverName: "csi.cloudscale.ch", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for the [cloudscale.ch](https://www.cloudscale.ch/) IaaS platform", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Snapshot"}, {NameUrl: "[Datatom-InfinityCSI](https://github.com/datatom-infinity/infinity-csi)", DriverName: "csi-infiblock-plugin", Versions: "v0.3, v1.0.0, v1.1.0", Description: "A Container Storage Interface (CSI) Driver for DATATOM Infinity storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Topology"}, {NameUrl: "[Datatom-InfinityCSI (filesystem)](https://github.com/datatom-infinity/infinity-csi)", DriverName: "csi-infifs-plugin", Versions: "v0.3, v1.0.0, v1.1.0", Description: "A Container Storage Interface (CSI) Driver for DATATOM Infinity filesystem storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion"}, {NameUrl: "[Datera](https://github.com/Datera/datera-csi)", DriverName: "dsp.csi.daterainc.io", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Datera Data Services Platform (DSP)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Snapshot"}, {NameUrl: "[DDN EXAScaler](https://github.com/DDNStorage/exa-csi-driver)", DriverName: "exa.csi.ddn.com", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) Driver for DDN EXAScaler filesystems", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion"}, {NameUrl: "[Dell EMC PowerMax](https://github.com/dell/csi-powermax)", DriverName: "csi-powermax.dellemc.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [Dell EMC PowerMax](https://www.delltechnologies.com/en-us/storage/powermax.htm)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[Dell EMC PowerScale](https://github.com/dell/csi-powerscale)", DriverName: "csi-isilon.dellemc.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [Dell EMC PowerScale](https://www.delltechnologies.com/en-us/storage/powerscale.htm)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[Dell EMC PowerStore](https://github.com/dell/csi-powerstore)", DriverName: "csi-powerstore.dellemc.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [Dell EMC PowerStore](https://www.delltechnologies.com/en-us/storage/powerstore-storage-appliance.htm)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[Dell EMC Unity](https://github.com/dell/csi-unity)", DriverName: "csi-unity.dellemc.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [Dell EMC Unity](https://www.delltechnologies.com/en-us/storage/unity.htm)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[Dell EMC VxFlexOS](https://github.com/dell/csi-vxflexos)", DriverName: "csi-vxflexos.dellemc.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [Dell EMC VxFlexOS](https://www.delltechnologies.com/en-us/hyperconverged-infrastructure/vxflex.htm)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[democratic-csi](https://github.com/democratic-csi/democratic-csi)", DriverName: "org.democratic-csi", Versions: "v1.0,v1.1,v1.2,v1.3,v1.4,v1.5", Description: "Generic CSI plugin supporting zfs based solutions ([FreeNAS](https://www.freenas.org/) / [TrueNAS](https://www.truenas.com/) and [ZoL](https://zfsonlinux.org/) solutions such as [Ubuntu](https://ubuntu.com/)), [Synology](https://www.synology.com/), and more", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod (Block Volume)

Read/Write Multiple Pods (File Volume)", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[Diamanti-CSI](https://diamanti.com/use-cases/io-acceleration/#csi)", DriverName: "dcx.csi.diamanti.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Diamanti DCX Platform", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion"}, {NameUrl: "[DigitalOcean Block Storage](https://github.com/digitalocean/csi-digitalocean)", DriverName: "dobs.csi.digitalocean.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for DigitalOcean Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion"}, {NameUrl: "[Dothill-CSI](https://github.com/enix/dothill-csi)", DriverName: "dothill.csi.enix.io", Versions: "v1.3", Description: "Generic CSI plugin supporting [Seagate AssuredSan](https://www.seagate.com/fr/fr/support/dothill-san/assuredsan-pro-5000-series/) appliances such as [HPE MSA](https://www.hpe.com/us/en/storage/flash-hybrid.html), [Dell EMC PowerVault ME4](https://www.dell.com/fr-fr/work/shop/productdetailstxn/powervault-me4-series) and others ...", Persistence: "Persistent", AccessModes: "Read/Write Single Node", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion"}, {NameUrl: "[Ember CSI](https://ember-csi.io)", DriverName: "ember-csi.io", Versions: "v0.2, v0.3, v1.0", Description: "Multi-vendor CSI plugin supporting over 80 Drivers to provide block and mount storage to Container Orchestration systems.", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot"}, {NameUrl: "[Excelero NVMesh](https://github.com/Excelero/nvmesh-csi-driver)", DriverName: "nvmesh-csi.excelero.com", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) Driver for Excelero NVMesh", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Expansion"}, {NameUrl: "[GCE Persistent Disk](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver)", DriverName: "pd.csi.storage.gke.io", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Google Compute Engine Persistent Disk (GCE PD)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Topology"}, {NameUrl: "[Google Cloud Filestore](https://github.com/kubernetes-sigs/gcp-filestore-csi-driver)", DriverName: "com.google.csi.filestore", Versions: "v0.3", Description: "A Container Storage Interface (CSI) Driver for Google Cloud Filestore", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[Google Cloud Storage](https://github.com/ofek/csi-gcs)", DriverName: "gcs.csi.ofek.dev", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Google Cloud Storage", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion"}, {NameUrl: "[GlusterFS](https://github.com/gluster/gluster-csi-driver)", DriverName: "org.gluster.glusterfs", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for GlusterFS", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot"}, {NameUrl: "[Gluster VirtBlock](https://github.com/gluster/gluster-csi-driver)", DriverName: "org.gluster.glustervirtblock", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Gluster Virtual Block volumes", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[Hammerspace CSI](https://github.com/hammer-space/csi-plugin)", DriverName: "com.hammerspace.csi", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Hammerspace Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot"}, {NameUrl: "[Hedvig](https://documentation.commvault.com/commvault/hedvig/others/pdf/Hedvig_CSI_User_Guide.pdf)", DriverName: "io.hedvig.csi", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Hedvig", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion"}, {NameUrl: "[Hetzner Cloud Volumes CSI](https://github.com/hetznercloud/csi-driver)", DriverName: "csi.hetzner.cloud", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for Hetzner Cloud Volumes", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Expansion"}, {NameUrl: "[Hitachi Vantara](https://knowledge.hitachivantara.com/Documents/Adapters_and_Drivers/Storage_Adapters_and_Drivers/Containers)", DriverName: "hspc.csi.hitachi.com", Versions: "v1.2", Description: "A Container Storage Interface (CSI) Driver for VSP series Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[HPE](https://github.com/hpe-storage/csi-driver)", DriverName: "csi.hpe.com", Versions: "v1.3", Description: "A [multi-platform](https://scod.hpedev.io/csi_driver) Container Storage Interface (CSI) driver. Supports [HPE Alletra](https://hpe.com/storage/alletra), [Nimble Storage](https://hpe.com/storage/nimble), [Primera](https://hpe.com/storage/primera) and [3PAR](https://hpe.com/storage/3par)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[HPE Ezmeral (MapR)](https://github.com/mapr/mapr-csi)", DriverName: "com.mapr.csi-kdf", Versions: "v1.3", Description: "A Container Storage Interface (CSI) Driver for HPE Ezmeral Data Fabric", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[Huawei Storage CSI](https://github.com/Huawei/eSDK_K8S_Plugin)", DriverName: "csi.huawei.com", Versions: "v1.0, v1.1, v1.2", Description: "A Container Storage Interface (CSI) Driver for FusionStorage, OceanStor 100D, OceanStor Pacific, OceanStor Dorado V3, OceanStor Dorado V6, OceanStor V3, OceanStor V5", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pod", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning"}, {NameUrl: "[HyperV CSI](https://github.com/Zetanova/hyperv-csi-driver)", DriverName: "eu.zetanova.csi.hyperv", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) driver to manage hyperv hosts", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[IBM Block Storage](https://github.com/ibm/ibm-block-csi-driver)", DriverName: "block.csi.ibm.com", Versions: "v1.0, v1.1, v1.2", Description: "A Container Storage Interface (CSI) [Driver](https://www.ibm.com/docs/en/stg-block-csi-driver) for IBM Spectrum Virtualize Family, IBM FlashSystem A9000 and A9000R, IBM DS8000 Family 8.x and higher.", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[IBM Spectrum Scale](https://github.com/IBM/ibm-spectrum-scale-csi)", DriverName: "spectrumscale.csi.ibm.com", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) [Driver](https://www.ibm.com/docs/en/spectrum-scale-csi) for the IBM Spectrum Scale File System", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pod", DynamicProvisioning: "Yes", Features: "Snapshot"}, {NameUrl: "[IBM Cloud Block Storage VPC CSI Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block)", DriverName: "vpc.block.csi.ibm.io", Versions: "v1.0", Description: "A Container Storage Interface (CSI) [Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block) for IBM Cloud Kubernetes Service and Red Hat OpenShift on IBM Cloud", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block"}, {NameUrl: "[Infinidat](https://github.com/Infinidat/infinibox-csi-driver)", DriverName: "infinibox-csi-driver", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) Driver for Infinidat [InfiniBox](https://infinidat.com/en/products-technology/infinibox)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[Inspur InStorage CSI](https://github.com/OpenInspur/instorage-k8s)", DriverName: "csi-instorage", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for inspur AS/HF/CS/CF Series Primary Storage, inspur AS13000 Series SDS Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[Intel PMEM-CSI](https://github.com/intel/pmem-csi)", DriverName: "pmem-csi.intel.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) driver for [PMEM](https://pmem.io/) from Intel", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block"}, {NameUrl: "[Intelliflash Block Storage](https://github.com/DDNStorage/intelliflash-csi-block-driver)", DriverName: "intelliflash-csi-block-driver.intelliflash.com", Versions: "v1.0, v1.1, v1.2", Description: "A Container Storage Interface (CSI) Driver for Intelliflash Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[Intelliflash File Storage](https://github.com/DDNStorage/intelliflash-csi-file-driver)", DriverName: "intelliflash-csi-file-driver.intelliflash.com", Versions: "v1.0, v1.1, v1.2", Description: "A Container Storage Interface (CSI) Driver for Intelliflash File Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[ionir ](https://github.com/ionir-cloud)", DriverName: "ionir", Versions: "v1.2", Description: "A Container Storage Interface (CSI) Driver for [ionir](https://www.ionir.com/) Kubernetes-Native Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Cloning"}, {NameUrl: "[JuiceFS](https://github.com/juicedata/juicefs-csi-driver)", DriverName: "csi.juicefs.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for JuiceFS File System", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pod", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[kaDalu](https://github.com/kadalu/kadalu)", DriverName: "org.kadalu.gluster", Versions: "v0.3", Description: "A CSI Driver (and operator) for GlusterFS", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[KumoScale Block Storage](https://github.com/KioxiaAmerica/kumoscale-csi)", DriverName: "kumoscale.kioxia.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for KumoScale Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Topology"}, {NameUrl: "[Linode Block Storage](https://github.com/linode/linode-blockstorage-csi-driver)", DriverName: "linodebs.csi.linode.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Linode Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[LINSTOR](https://github.com/piraeusdatastore/linstor-csi)", DriverName: "linstor.csi.linbit.com", Versions: "v1.2", Description: "A Container Storage Interface (CSI) Driver for [LINSTOR](https://www.linbit.com/en/linstor/) volumes", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[Longhorn](https://github.com/longhorn/longhorn)", DriverName: "driver.longhorn.io", Versions: "v1.2", Description: "A Container Storage Interface (CSI) Driver for [Longhorn](https://longhorn.io/) volumes", Persistence: "Persistent", AccessModes: "Read/Write Single Node", DynamicProvisioning: "Yes", Features: "Raw Block"}, {NameUrl: "[MacroSAN](https://github.com/macrosan-csi/macrosan-csi-driver)", DriverName: "csi-macrosan", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for MacroSAN Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[Manila](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/manila)", DriverName: "manila.csi.openstack.org", Versions: "v1.1, v1.2", Description: "A Container Storage Interface (CSI) Driver for OpenStack Shared File System Service (Manila)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Topology"}, {NameUrl: "[MooseFS](https://github.com/moosefs/moosefs-csi)", DriverName: "com.tuxera.csi.moosefs", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for [MooseFS](https://moosefs.com/) clusters.", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[NetApp](https://github.com/NetApp/trident)", DriverName: "csi.trident.netapp.io", Versions: "v1.0, v1.1, v1.2, v1.3", Description: "A Container Storage Interface (CSI) Driver for NetApp's [Trident](https://netapp-trident.readthedocs.io/) container storage orchestrator", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[NexentaStor File Storage](https://github.com/Nexenta/nexentastor-csi-driver)", DriverName: "nexentastor-csi-driver.nexenta.com", Versions: "v1.0, v1.1, v1.2", Description: "A Container Storage Interface (CSI) Driver for NexentaStor File Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning, Topology"}, {NameUrl: "[NexentaStor Block Storage](https://github.com/Nexenta/nexentastor-csi-driver-block)", DriverName: "nexentastor-block-csi-driver.nexenta.com", Versions: "v1.0, v1.1, v1.2", Description: "A Container Storage Interface (CSI) Driver for NexentaStor over iSCSI protocol", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning, Topology, Raw block"}, {NameUrl: "[Nutanix](https://github.com/nutanix/csi-plugin)", DriverName: "csi.nutanix.com", Versions: "v0.3, v1.0, v1.2", Description: "A Container Storage Interface (CSI) Driver for Nutanix", Persistence: "Persistent", AccessModes: "Read/Write Single Pod with Nutanix Volumes and Read/Write Multiple Pods with Nutanix Files", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[OpenEBS](https://github.com/openebs/csi)", DriverName: "cstor.csi.openebs.io", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for [OpenEBS](https://www.openebs.io/)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Expansion, Snapshot, Cloning"}, {NameUrl: "[Open-E](https://github.com/open-e/JovianDSS-KubernetesCSI)", DriverName: "com.open-e.joviandss.csi", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Open-E JovianDSS Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Snapshot, Cloning"}, {NameUrl: "[Open-Local](https://github.com/alibaba/open-local)", DriverName: "local.csi.alibaba.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Local Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Expansion, Snapshot"}, {NameUrl: "[Oracle Cloud Infrastructure(OCI) Block Storage](https://github.com/oracle/oci-cloud-controller-manager/blob/master/container-storage-interface.md)", DriverName: "blockvolume.csi.oraclecloud.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for Oracle Cloud Infrastructure (OCI) Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Topology"}, {NameUrl: "[oVirt](https://github.com/openshift/ovirt-csi-driver)", DriverName: "csi.ovirt.org", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for [oVirt](https://ovirt.org)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Block, File Storage"}, {NameUrl: "[Portworx](https://github.com/libopenstorage/openstorage/tree/master/csi)", DriverName: "pxd.portworx.com", Versions: "v1.4", Description: "A Container Storage Interface (CSI) Driver for [Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/csi/)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Raw Block, Cloning"}, {NameUrl: "[Pure Storage CSI](https://github.com/purestorage/pso-csi)", DriverName: "pure-csi", Versions: "v1.0, v1.1, v1.2, v1.3", Description: "A Container Storage Interface (CSI) Driver for Pure Storage's [Pure Service Orchestrator](https://purestorage.com/containers)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Cloning, Raw Block, Topology, Expansion"}, {NameUrl: "[QingCloud CSI](https://github.com/yunify/qingcloud-csi)", DriverName: "disk.csi.qingcloud.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for QingCloud Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[QingStor CSI](https://github.com/yunify/qingstor-csi)", DriverName: "neonsan.csi.qingstor.com", Versions: "v0.3, v1.1", Description: "A Container Storage Interface (CSI) Driver for NeonSAN storage system", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[Quobyte](https://github.com/quobyte/quobyte-csi)", DriverName: "quobyte-csi", Versions: "v0.2", Description: "A Container Storage Interface (CSI) Driver for Quobyte", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[ROBIN](https://get.robin.io/)", DriverName: "robin", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for [ROBIN](https://docs.robin.io)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[SandStone](https://github.com/sandstone-storage/sandstone-csi-driver)", DriverName: "csi-sandstone-plugin", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for SandStone USP", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[Sangfor-EDS-File-Storage](https://github.com/evan37717/sangfor-eds-csi)", DriverName: "eds.csi.file.sangfor.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Sangfor Distributed File Storage(EDS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[Sangfor-EDS-Block-Storage](https://github.com/eds-wzc/sangfor-eds-csi)", DriverName: "eds.csi.block.sangfor.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Sangfor Block Storage(EDS)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[Scaleway CSI](https://github.com/scaleway/scaleway-csi)", DriverName: "csi.scaleway.com", Versions: "v1.2.0", Description: "Container Storage Interface (CSI) Driver for [Scaleway Block Storage](https://www.scaleway.com/block-storage/)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Topology"}, {NameUrl: "[Seagate Exos X](https://github.com/Seagate/seagate-exos-x-csi)", DriverName: "csi-exos-x.seagate.com", Versions: "v1.3", Description: "CSI driver for [Seagate Exos X](https://www.seagate.com/products/storage/data-storage-systems/raid/) and OEM systems", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning"}, {NameUrl: "[SeaweedFS](https://github.com/seaweedfs/seaweedfs-csi-driver)", DriverName: "seaweedfs-csi-driver", Versions: "v1.0", Description: "A Container Storage Interface (CSI Driver for [SeaweedFS](https://github.com/chrislusf/seaweedfs))", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning"}, {NameUrl: "[Secrets Store CSI Driver](https://github.com/kubernetes-sigs/secrets-store-csi-driver)", DriverName: "secrets-store.csi.k8s.io", Versions: "v0.0.10", Description: "A Container Storage Interface (CSI) Driver for mounting secrets, keys, and certs stored in enterprise-grade external secrets stores as volumes.", Persistence: "Ephemeral", AccessModes: "N/A", DynamicProvisioning: "N/A", Features: ""}, {NameUrl: "[SmartX](http://www.smartx.com/?locale=en)", DriverName: "csi-smtx-plugin", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for SmartX ZBS Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion"}, {NameUrl: "[SODA](https://github.com/sodafoundation/nbp/tree/master/csi)", DriverName: "csi-soda-plugin", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for [SODA](https://sodafoundation.io/)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot"}, {NameUrl: "[SPDK-CSI](https://github.com/spdk/spdk-csi)", DriverName: "csi.spdk.io", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for [SPDK](https://spdk.io/)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot"}, {NameUrl: "[StorageOS](https://docs.storageos.com/docs/platforms/kubernetes/install/)", DriverName: "storageos", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for [StorageOS](https://storageos.com/)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot"}, {NameUrl: "[Storidge](https://docs.storidge.com/kubernetes_storage/overview.html)", DriverName: "csi.cio.storidge.com", Versions: "v0.3, v1.0", Description: "A Container Storage Interface (CSI) Driver for [Storidge CIO](https://storidge.com/)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion"}, {NameUrl: "[StorPool](https://kb.storpool.com/storpool_integrations/github/kubernetes.html)", DriverName: "csi-driver.storpool.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for [StorPool](https://storpool.com/)", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Expansion"}, {NameUrl: "[Synology](https://github.com/SynologyOpenSource/synology-csi)", DriverName: "csi.san.synology.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Synology NAS", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning"}, {NameUrl: "[Tencent Cloud Block Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)", DriverName: "com.tencent.cloud.csi.cbs", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Tencent Cloud Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Snapshot"}, {NameUrl: "[Tencent Cloud File Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)", DriverName: "com.tencent.cloud.csi.cfs", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Tencent Cloud File Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot"}, {NameUrl: "[Tencent Cloud Object Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)", DriverName: "com.tencent.cloud.csi.cosfs", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for Tencent Cloud Object Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "No", Features: "Snapshot"}, {NameUrl: "[TopoLVM](https://github.com/cybozu-go/topolvm)", DriverName: "topolvm.cybozu.com", Versions: "v1.1", Description: "A Container Storage Interface (CSI) Driver for LVM", Persistence: "Persistent and Ephemeral", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Expansion, Topology Aware"}, {NameUrl: "[VAST Data](https://github.com/vast-data/vast-csi)", DriverName: "csi.vastdata.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for VAST Data", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Expansion, Topology Aware"}, {NameUrl: "[XSKY-EBS](https://xsky-storage.github.io/xsky-csi-driver/csi-block.html)", DriverName: "csi.block.xsky.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for XSKY Distributed Block Storage (X-EBS)", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, {NameUrl: "[XSKY-EUS](https://xsky-storage.github.io/xsky-csi-driver/csi-fs.html)", DriverName: "csi.fs.xsky.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for XSKY Distributed File Storage (X-EUS)", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[Vault](https://github.com/kubevault/csi-driver)", DriverName: "secrets.csi.kubevault.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for mounting HashiCorp Vault secrets as volumes.", Persistence: "Ephemeral", AccessModes: "N/A", DynamicProvisioning: "N/A", Features: ""}, {NameUrl: "[VDA](https://virtual-disk-array.readthedocs.io/en/latest/Introduction.html)", DriverName: "csi.vda.io", Versions: "v1.0", Description: "An open source block storage system base on SPDK", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "N/A", Features: ""}, {NameUrl: "[Veritas InfoScale Volumes](https://www.veritas.com/solution/virtualization/containers.html)", DriverName: "org.veritas.infoscale", Versions: "v1.2", Description: "A Container Storage Interface (CSI) Driver for Veritas InfoScale volumes", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Snapshot, Expansion, Cloning"}, {NameUrl: "[vSphere](https://github.com/kubernetes-sigs/vsphere-csi-driver)", DriverName: "csi.vsphere.vmware.com", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for VMware vSphere", Persistence: "Persistent", AccessModes: "Read/Write Single Pod (Block Volume)

Read/Write Multiple Pods (File Volume)", DynamicProvisioning: "Yes", Features: "Raw Block,

Expansion (Block Volume),

Topology Aware (Block Volume)"}, {NameUrl: "[Vultr Block Storage](https://github.com/vultr/vultr-csi)", DriverName: "block.csi.vultr.com", Versions: "v1.2", Description: "A Container Storage Interface (CSI) Driver for Vultr Block Storage", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[WekaIO](https://github.com/weka/csi-wekafs)", DriverName: "csi.weka.io", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for mounting WekaIO WekaFS filesystem as volumes", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[Yandex.Cloud](https://github.com/flant/yandex-csi-driver)", DriverName: "yandex.csi.flant.com", Versions: "v1.2", Description: "A Container Storage Interface (CSI) plugin for Yandex.Cloud Compute Disks", Persistence: "Persistent", AccessModes: "Read/Write Single Pod", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[YanRongYun](http://www.yanrongyun.com/)", DriverName: "?", Versions: "v1.0", Description: "A Container Storage Interface (CSI) Driver for YanRong YRCloudFile Storage", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: ""}, {NameUrl: "[Zadara-CSI](https://github.com/zadarastorage/zadara-csi)", DriverName: "csi.zadara.com", Versions: "v1.0, v1.1", Description: "A Container Storage Interface (CSI) plugin for Zadara VPSA Storage Array & VPSA All-Flash", Persistence: "Persistent", AccessModes: "Read/Write Multiple Pods", DynamicProvisioning: "Yes", Features: "Raw Block, Snapshot, Expansion, Cloning"}, } ================================================ FILE: pkg/kubestr/kubernetes_checks.go ================================================ package kubestr import ( "fmt" "strconv" "github.com/pkg/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" version "k8s.io/apimachinery/pkg/version" ) const ( // MinK8sMajorVersion is the minimum supported Major version MinK8sMajorVersion = 1 // MinK8sMinorVersion is the minimum supported Minor version MinK8sMinorVersion = 12 // MinK8sGitVersion is the minimum supported k8s version MinK8sGitVersion = "v1.12.0" // RbacGroupName describe hte rbac group name RbacGroupName = "rbac.authorization.k8s.io" ) // KubernetesChecks runs all the baseline checks on the cluster func (p *Kubestr) KubernetesChecks() []*TestOutput { var result []*TestOutput result = append(result, p.validateK8sVersion()) result = append(result, p.validateRBAC()) result = append(result, p.validateAggregatedLayer()) return result } // validateK8sVersion validates the clusters K8s version func (p *Kubestr) validateK8sVersion() *TestOutput { testName := "Kubernetes Version Check" version, err := p.validateK8sVersionHelper() if err != nil { return MakeTestOutput(testName, StatusError, err.Error(), nil) } return MakeTestOutput(testName, StatusOK, fmt.Sprintf("Valid kubernetes version (%s)", version.String()), version) } // getK8sVersion fetches the k8s vesion func (p *Kubestr) validateK8sVersionHelper() (*version.Info, error) { version, err := p.cli.Discovery().ServerVersion() if err != nil { return nil, err } majorStr := version.Major if len(majorStr) > 1 && string(majorStr[len(majorStr)-1]) == "+" { majorStr = majorStr[:len(majorStr)-1] } major, err := strconv.Atoi(majorStr) if err != nil { return nil, errors.Wrap(err, "unable to derive kubernetes major version") } minorStr := version.Minor if len(minorStr) > 1 && string(minorStr[len(minorStr)-1]) == "+" { minorStr = minorStr[:len(minorStr)-1] } minor, err := strconv.Atoi(minorStr) if err != nil { return nil, errors.Wrap(err, "unable to derive kubernetes minor version") } if (major < MinK8sMajorVersion) || (major == MinK8sMajorVersion && minor < MinK8sMinorVersion) { return version, fmt.Errorf("current kubernetes version (%s) is not supported, minimum version is %s", version.String(), MinK8sGitVersion) } return version, nil } func (p *Kubestr) validateRBAC() *TestOutput { testName := "RBAC Check" //fmt.Println(" Checking if Kubernetes RBAC is enabled:") group, err := p.validateRBACHelper() if err != nil { return MakeTestOutput(testName, StatusError, err.Error(), nil) } return MakeTestOutput(testName, StatusOK, "Kubernetes RBAC is enabled", *group) } // getRBAC runs the Rbac test func (p *Kubestr) validateRBACHelper() (*v1.APIGroup, error) { serverGroups, err := p.cli.Discovery().ServerGroups() if err != nil { return nil, err } for _, group := range serverGroups.Groups { if group.Name == RbacGroupName { return &group, nil } } return nil, fmt.Errorf("Kubernetes RBAC is not enabled") //nolint:staticcheck } func (p *Kubestr) validateAggregatedLayer() *TestOutput { testName := "Aggregated Layer Check" resourceList, err := p.validateAggregatedLayerHelper() if err != nil { MakeTestOutput(testName, StatusError, err.Error(), nil) } return MakeTestOutput(testName, StatusOK, "The Kubernetes Aggregated Layer is enabled", resourceList) } // getAggregatedLayer checks the aggregated API layer func (p *Kubestr) validateAggregatedLayerHelper() (*v1.APIResourceList, error) { _, serverResources, err := p.cli.Discovery().ServerGroupsAndResources() if err != nil { return nil, err } for _, resourceList := range serverResources { if resourceList.GroupVersion == "apiregistration.k8s.io/v1" || resourceList.GroupVersion == "apiregistration.k8s.io/v1beta1" { return resourceList, nil } } return nil, fmt.Errorf("can not detect the Aggregated API Layer, is it enabled?") } ================================================ FILE: pkg/kubestr/kubernetes_checks_test.go ================================================ package kubestr import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" version "k8s.io/apimachinery/pkg/version" discoveryfake "k8s.io/client-go/discovery/fake" "k8s.io/client-go/kubernetes/fake" . "gopkg.in/check.v1" ) func Test(t *testing.T) { TestingT(t) } type K8sChecksTestSuite struct{} var _ = Suite(&K8sChecksTestSuite{}) func (s *K8sChecksTestSuite) TestGetK8sVersion(c *C) { for _, tc := range []struct { ver *version.Info checker Checker out *version.Info }{ { ver: &version.Info{Major: "1", Minor: "17", GitVersion: "v1.17"}, checker: IsNil, out: &version.Info{Major: "1", Minor: "17", GitVersion: "v1.17"}, }, { ver: &version.Info{Major: "1", Minor: "11", GitVersion: "v1.11"}, checker: NotNil, out: &version.Info{Major: "1", Minor: "11", GitVersion: "v1.11"}, }, { ver: &version.Info{Major: "1", Minor: "", GitVersion: "v1."}, checker: NotNil, out: nil, }, { ver: &version.Info{Major: "", Minor: "11", GitVersion: "v."}, checker: NotNil, out: nil, }, } { cli := fake.NewSimpleClientset() cli.Discovery().(*discoveryfake.FakeDiscovery).FakedServerVersion = tc.ver p := &Kubestr{cli: cli} out, err := p.validateK8sVersionHelper() c.Assert(out, DeepEquals, tc.out) c.Check(err, tc.checker) } } func (s *K8sChecksTestSuite) TestValidateRBAC(c *C) { for _, tc := range []struct { resources []*metav1.APIResourceList checker Checker out *metav1.APIGroup }{ { resources: []*metav1.APIResourceList{ { GroupVersion: "/////", }, }, checker: NotNil, out: nil, }, { resources: []*metav1.APIResourceList{ { GroupVersion: "rbac.authorization.k8s.io/v1", }, }, checker: IsNil, out: &metav1.APIGroup{ Name: "rbac.authorization.k8s.io", Versions: []metav1.GroupVersionForDiscovery{ {GroupVersion: "rbac.authorization.k8s.io/v1", Version: "v1"}, }, PreferredVersion: metav1.GroupVersionForDiscovery{GroupVersion: "rbac.authorization.k8s.io/v1", Version: "v1"}, }, }, { resources: []*metav1.APIResourceList{ { GroupVersion: "notrbac.authorization.k8s.io/v1", }, }, checker: NotNil, out: nil, }, } { cli := fake.NewSimpleClientset() cli.Discovery().(*discoveryfake.FakeDiscovery).Resources = tc.resources p := &Kubestr{cli: cli} out, err := p.validateRBACHelper() c.Assert(out, DeepEquals, tc.out) c.Check(err, tc.checker) } } func (s *K8sChecksTestSuite) TestValidateAggregatedLayer(c *C) { for _, tc := range []struct { resources []*metav1.APIResourceList checker Checker out *metav1.APIResourceList }{ { resources: []*metav1.APIResourceList{ { GroupVersion: "/////", }, }, checker: NotNil, out: nil, }, { resources: []*metav1.APIResourceList{ { GroupVersion: "apiregistration.k8s.io/v1", }, }, checker: IsNil, out: &metav1.APIResourceList{ GroupVersion: "apiregistration.k8s.io/v1", }, }, { resources: []*metav1.APIResourceList{ { GroupVersion: "apiregistration.k8s.io/v1beta1", }, }, checker: IsNil, out: &metav1.APIResourceList{ GroupVersion: "apiregistration.k8s.io/v1beta1", }, }, { resources: []*metav1.APIResourceList{ { GroupVersion: "notapiregistration.k8s.io/v1", }, }, checker: NotNil, out: nil, }, } { cli := fake.NewSimpleClientset() cli.Discovery().(*discoveryfake.FakeDiscovery).Resources = tc.resources p := &Kubestr{cli: cli} out, err := p.validateAggregatedLayerHelper() c.Assert(out, DeepEquals, tc.out) c.Check(err, tc.checker) } } ================================================ FILE: pkg/kubestr/kubestr.go ================================================ package kubestr import ( "github.com/kanisterio/kanister/pkg/kube" "github.com/kastenhq/kubestr/pkg/fio" "github.com/pkg/errors" sv1 "k8s.io/api/storage/v1" unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) // Kubestr is the primary object for running the kubestr tool. It holds all the cluster state information // as well. type Kubestr struct { cli kubernetes.Interface dynCli dynamic.Interface sdsfgValidator snapshotDataSourceFG storageClassList *sv1.StorageClassList volumeSnapshotClassList *unstructured.UnstructuredList Fio fio.FIO } const Logo = ` ************************************** _ ___ _ ___ ___ ___ _____ ___ | |/ / | | | _ ) __/ __|_ _| _ \ | ' <| |_| | _ \ _|\__ \ | | | / |_|\_\\___/|___/___|___/ |_| |_|_\ Explore your Kubernetes storage options ************************************** ` var ( DefaultQPS = float32(50) DefaultBurst = 100 ) // NewKubestr initializes a new kubestr object to run preflight tests func NewKubestr() (*Kubestr, error) { cli, err := LoadKubeCli() if err != nil { return nil, err } dynCli, err := LoadDynCli() if err != nil { return nil, err } return &Kubestr{ cli: cli, dynCli: dynCli, sdsfgValidator: &snapshotDataSourceFGValidator{ cli: cli, dynCli: dynCli, }, Fio: &fio.FIOrunner{ Cli: cli, }, }, nil } // LoadDynCli loads the config and returns a dynamic CLI func LoadDynCli() (dynamic.Interface, error) { cfg, err := kube.LoadConfig() if err != nil { return nil, errors.Wrap(err, "failed to load config for Dynamic client") } clientset, err := dynamic.NewForConfig(cfg) if err != nil { return nil, errors.Wrap(err, "failed to create Dynamic client") } return clientset, nil } // LoadKubeCli load the config and returns a kubernetes client // NewClient returns a k8 client configured by the kanister environment. func LoadKubeCli() (kubernetes.Interface, error) { config, err := kube.LoadConfig() if err != nil { return nil, err } config.QPS = DefaultQPS config.Burst = DefaultBurst // creates the clientset clientset, err := kubernetes.NewForConfig(config) if err != nil { return nil, err } return clientset, nil } ================================================ FILE: pkg/kubestr/storage_provisioners.go ================================================ package kubestr import ( "context" "fmt" "regexp" "strconv" "strings" kanvolume "github.com/kanisterio/kanister/pkg/kube/volume" "github.com/kastenhq/kubestr/pkg/common" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" sv1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" ) const ( // APIVersionKey describes the APIVersion key APIVersionKey = "apiVersion" // FeatureGateTestPVCName is the name of the pvc created by the feature gate // validation test FeatureGateTestPVCName = "kubestr-featuregate-test" // DefaultNS describes the default namespace DefaultNS = "default" // PodNamespaceEnvKey describes the pod namespace env variable PodNamespaceEnvKey = "POD_NAMESPACE" ) // Provisioner holds the important information of a provisioner type Provisioner struct { ProvisionerName string CSIDriver *CSIDriver URL string StorageClasses []*SCInfo VolumeSnapshotClasses []*VSCInfo StatusList []Status } type CSIDriver struct { NameUrl string DriverName string Versions string Description string Persistence string AccessModes string DynamicProvisioning string Features string } func (c *CSIDriver) Provider() string { re := regexp.MustCompile(`\[(.*?)\]`) match := re.FindStringSubmatch(c.NameUrl) // find the left most match if len(match) < 2 { return "" } return match[1] } func (c *CSIDriver) URL() string { re := regexp.MustCompile(`\((.*?)\)`) match := re.FindAllStringSubmatch(c.NameUrl, -1) if len(match) < 1 { return "" } url := match[len(match)-1] // find the right most match if len(url) < 2 { return "" } return url[1] } func (c *CSIDriver) Print(prefix string) { fmt.Printf(prefix+" Provider: %s\n", c.Provider()) fmt.Printf(prefix+" Website: %s\n", c.URL()) fmt.Printf(prefix+" Description: %s\n", c.Description) fmt.Printf(prefix+" Additional Features: %s\n", c.Features) } func (c *CSIDriver) SupportsSnapshots() bool { return strings.Contains(c.Features, "Snapshot") } // SCInfo stores the info of a StorageClass type SCInfo struct { Name string StatusList []Status Raw interface{} `json:",omitempty"` } // VSCInfo stores the info of a VolumeSnapshotClass type VSCInfo struct { Name string StatusList []Status HasAnnotation bool Raw interface{} `json:",omitempty"` } // Print prints the provionsioner specific details func (v *Provisioner) Print() { printSuccessColor(" " + v.ProvisionerName + ":") for _, status := range v.StatusList { status.Print(" ") } switch { case v.CSIDriver != nil: fmt.Println(" This is a CSI driver!") fmt.Println(" (The following info may not be up to date. Please check with the provider for more information.)") v.CSIDriver.Print(" ") case strings.HasPrefix(v.ProvisionerName, "kubernetes.io"): fmt.Println(" This is an in tree provisioner.") case strings.Contains(v.ProvisionerName, "csi"): fmt.Println(" This might be a CSI Driver. But it is not publicly listed.") default: fmt.Println(" Unknown driver type.") } fmt.Println() if len(v.StorageClasses) > 0 { fmt.Printf(" Storage Classes:\n") for _, sc := range v.StorageClasses { fmt.Printf(" * %s\n", sc.Name) for _, status := range sc.StatusList { status.Print(" ") } } } if len(v.VolumeSnapshotClasses) > 0 { fmt.Printf(" Volume Snapshot Classes:\n") for _, vsc := range v.VolumeSnapshotClasses { fmt.Printf(" * %s\n", vsc.Name) for _, status := range vsc.StatusList { status.Print(" ") } } } if len(v.StorageClasses) > 0 { fmt.Println() fmt.Println(" To perform a FIO test, run-") fmt.Println(" ./kubestr fio -s ") fmt.Println() fmt.Println(" To perform a check for block device support, run-") fmt.Println(" ./kubestr blockmount -s ") switch { case len(v.VolumeSnapshotClasses) == 0 && v.CSIDriver != nil && v.CSIDriver.SupportsSnapshots(): fmt.Println() fmt.Println(" This provisioner supports snapshots, however no Volume Snaphsot Classes were found.") case len(v.VolumeSnapshotClasses) > 0: fmt.Println() fmt.Println(" To test CSI snapshot/restore functionality, run-") fmt.Println(" ./kubestr csicheck -s -v ") } } } // ValidateProvisioners validates the provisioners in a cluster func (p *Kubestr) ValidateProvisioners(ctx context.Context) ([]*Provisioner, error) { provisionerList, err := p.provisionerList(ctx) if err != nil { return nil, fmt.Errorf("error listing provisioners: %w", err) } var validateProvisionersOutput []*Provisioner for _, provisioner := range provisionerList { processedProvisioner, err := p.processProvisioner(ctx, provisioner) if err != nil { return nil, err } validateProvisionersOutput = append(validateProvisionersOutput, processedProvisioner) } return validateProvisionersOutput, nil } func (p *Kubestr) processProvisioner(ctx context.Context, provisioner string) (*Provisioner, error) { retProvisioner := &Provisioner{ ProvisionerName: provisioner, } storageClassList, err := p.loadStorageClasses(ctx) if err != nil { return nil, err } for _, storageClass := range storageClassList.Items { if storageClass.Provisioner == provisioner { retProvisioner.StorageClasses = append(retProvisioner.StorageClasses, p.validateStorageClass(provisioner, storageClass)) // review this } } for _, csiDriver := range CSIDriverList { if strings.Contains(provisioner, csiDriver.DriverName) { retProvisioner.CSIDriver = csiDriver } } if retProvisioner.CSIDriver != nil { if !p.hasCSIDriverObject(ctx, provisioner) { retProvisioner.StatusList = append(retProvisioner.StatusList, makeStatus(StatusWarning, "Missing CSIDriver Object. Required by some provisioners.", nil)) } if clusterCsiSnapshotCapable, err := p.isK8sVersionCSISnapshotCapable(ctx); err != nil || !clusterCsiSnapshotCapable { retProvisioner.StatusList = append(retProvisioner.StatusList, makeStatus(StatusInfo, "Cluster is not CSI snapshot capable. Requires VolumeSnapshotDataSource feature gate.", nil)) return retProvisioner, errors.Wrap(err, "failed to validate if Kubernetes version was CSI capable") } csiSnapshotGroupVersion := p.getCSIGroupVersion() if csiSnapshotGroupVersion == nil { retProvisioner.StatusList = append(retProvisioner.StatusList, makeStatus(StatusInfo, "Can't find the CSI snapshot group api version.", nil)) return retProvisioner, nil } // load volumeSnapshotClass vscs, err := p.loadVolumeSnapshotClasses(ctx, csiSnapshotGroupVersion.Version) if err != nil { return nil, errors.Wrap(err, "failed to load volume snapshot classes") } for _, vsc := range vscs.Items { if p.getDriverNameFromUVSC(vsc, csiSnapshotGroupVersion.GroupVersion) == provisioner { retProvisioner.VolumeSnapshotClasses = append(retProvisioner.VolumeSnapshotClasses, p.validateVolumeSnapshotClass(vsc, csiSnapshotGroupVersion.GroupVersion)) } } } return retProvisioner, nil } // hasCSIDriverObject sees if a provisioner has a CSIDriver Object func (p *Kubestr) hasCSIDriverObject(ctx context.Context, provisioner string) bool { csiDrivers, err := p.cli.StorageV1beta1().CSIDrivers().List(ctx, metav1.ListOptions{}) if err != nil { return false } for _, driver := range csiDrivers.Items { if driver.Name == provisioner { return true } } return false } func (p *Kubestr) isK8sVersionCSISnapshotCapable(ctx context.Context) (bool, error) { k8sVersion, err := p.validateK8sVersionHelper() if err != nil { return false, err } minorStr := k8sVersion.Minor if string(minorStr[len(minorStr)-1]) == "+" { minorStr = minorStr[:len(minorStr)-1] } minor, err := strconv.Atoi(minorStr) if err != nil { return false, err } if minor < 17 && k8sVersion.Major == "1" { return p.sdsfgValidator.validate(ctx) } return true, nil } // validateStorageClass validates a storageclass func (p *Kubestr) validateStorageClass(provisioner string, storageClass sv1.StorageClass) *SCInfo { scStatus := &SCInfo{ Name: storageClass.Name, Raw: storageClass, } return scStatus } // validateVolumeSnapshotClass validates the VolumeSnapshotClass func (p *Kubestr) validateVolumeSnapshotClass(vsc unstructured.Unstructured, groupVersion string) *VSCInfo { retVSC := &VSCInfo{ Name: vsc.GetName(), Raw: vsc, } if groupVersion != common.SnapshotVersion { retVSC.StatusList = append(retVSC.StatusList, makeStatus(StatusError, fmt.Sprintf("Unsupported GroupVersion (%s) for VolumeSnapshotClass (%s)", vsc.GetName(), groupVersion), nil)) return retVSC } _, ok := vsc.Object[common.VolSnapClassDriverKey] if !ok { retVSC.StatusList = append(retVSC.StatusList, makeStatus(StatusError, fmt.Sprintf("VolumeSnapshotClass (%s) missing 'driver' field", vsc.GetName()), nil)) } return retVSC } func (p *Kubestr) provisionerList(ctx context.Context) ([]string, error) { storageClassList, err := p.loadStorageClasses(ctx) if err != nil { return nil, err } provisionerSet := make(map[string]struct{}) for _, storageClass := range storageClassList.Items { provisionerSet[storageClass.Provisioner] = struct{}{} } return convertSetToSlice(provisionerSet), nil } func (p *Kubestr) loadStorageClasses(ctx context.Context) (*sv1.StorageClassList, error) { if p.storageClassList == nil { sc, err := p.cli.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } p.storageClassList = sc } return p.storageClassList, nil } func (p *Kubestr) loadVolumeSnapshotClasses(ctx context.Context, version string) (*unstructured.UnstructuredList, error) { if p.volumeSnapshotClassList == nil { VolSnapClassGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: version, Resource: common.VolumeSnapshotClassResourcePlural} us, err := p.dynCli.Resource(VolSnapClassGVR).List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } p.volumeSnapshotClassList = us } return p.volumeSnapshotClassList, nil } // getDriverNameFromUVSC get the driver name from an unstructured VSC func (p *Kubestr) getDriverNameFromUVSC(vsc unstructured.Unstructured, version string) string { var driverName interface{} var ok bool if version != common.SnapshotVersion { return "" } driverName, ok = vsc.Object[common.VolSnapClassDriverKey] if !ok { return "" } driver, ok := driverName.(string) if !ok { return "" } return driver } // getCSIGroupVersion fetches the CSI Group Version func (p *Kubestr) getCSIGroupVersion() *metav1.GroupVersionForDiscovery { groups, _, err := p.cli.Discovery().ServerGroupsAndResources() if err != nil { return nil } for _, group := range groups { if group.Name == common.SnapGroupName { return &group.PreferredVersion } } return nil } type snapshotDataSourceFG interface { validate(ctx context.Context) (bool, error) } type snapshotDataSourceFGValidator struct { cli kubernetes.Interface dynCli dynamic.Interface } func (s *snapshotDataSourceFGValidator) validate(ctx context.Context) (bool, error) { ns := getPodNamespace() // deletes if exists. If it doesn't exist, this is a noop err := kanvolume.DeletePVC(s.cli, ns, FeatureGateTestPVCName) if err != nil { return false, errors.Wrap(err, "error deleting VolumeSnapshotDataSource feature-gate validation pvc") } // defer delete defer func() { _ = kanvolume.DeletePVC(s.cli, ns, FeatureGateTestPVCName) }() // create PVC snapshotKind := "VolumeSnapshot" snapshotAPIGroup := "snapshot.storage.k8s.io" pvc := &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: FeatureGateTestPVCName, }, Spec: v1.PersistentVolumeClaimSpec{ AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, DataSource: &v1.TypedLocalObjectReference{ APIGroup: &snapshotAPIGroup, Kind: snapshotKind, Name: "fakeSnap", }, Resources: v1.VolumeResourceRequirements{ Requests: v1.ResourceList{ v1.ResourceStorage: resource.MustParse("1Gi"), }, }, }, } pvcRes, err := s.cli.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{}) if err != nil { return false, errors.Wrap(err, "error creating VolumeSnapshotDataSource feature-gate validation pvc") } if pvcRes.Spec.DataSource == nil { return false, nil } return true, nil } ================================================ FILE: pkg/kubestr/storage_provisioners_test.go ================================================ package kubestr import ( "context" "fmt" kansnapshot "github.com/kanisterio/kanister/pkg/kube/snapshot" . "gopkg.in/check.v1" scv1 "k8s.io/api/storage/v1" "k8s.io/api/storage/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" version "k8s.io/apimachinery/pkg/version" discoveryfake "k8s.io/client-go/discovery/fake" fakedynamic "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" ) type ProvisionerTestSuite struct{} var _ = Suite(&ProvisionerTestSuite{}) func (s *ProvisionerTestSuite) TestHasCSIDriverObject(c *C) { ctx := context.Background() for _, tc := range []struct { cli kubernetes.Interface provisionerName string hasDriver bool }{ { cli: fake.NewSimpleClientset(), provisionerName: "provisioner", hasDriver: false, }, { cli: fake.NewSimpleClientset(&v1beta1.CSIDriverList{ Items: []v1beta1.CSIDriver{ { ObjectMeta: metav1.ObjectMeta{ Name: "drivername", }, }, }}), provisionerName: "drivername", hasDriver: true, }, } { p := &Kubestr{cli: tc.cli} hasDriver := p.hasCSIDriverObject(ctx, tc.provisionerName) c.Assert(hasDriver, Equals, tc.hasDriver) } } func (s *ProvisionerTestSuite) TestIsK8sVersionCSISnapshotCapable(c *C) { ctx := context.Background() for _, tc := range []struct { ver *version.Info checker Checker capable bool sdsfg snapshotDataSourceFG }{ { ver: &version.Info{Major: "1", Minor: "", GitVersion: "v1.17"}, checker: NotNil, capable: false, }, { ver: &version.Info{Major: "1", Minor: "15+", GitVersion: "v1.15+"}, checker: NotNil, capable: false, sdsfg: &fakeSDSFGValidator{err: fmt.Errorf("someerror"), cap: false}, }, { ver: &version.Info{Major: "1", Minor: "15+", GitVersion: "v1.15+"}, checker: IsNil, capable: true, sdsfg: &fakeSDSFGValidator{err: nil, cap: true}, }, { ver: &version.Info{Major: "1", Minor: "17", GitVersion: "v1.17"}, checker: IsNil, capable: true, }, } { cli := fake.NewSimpleClientset() cli.Discovery().(*discoveryfake.FakeDiscovery).FakedServerVersion = tc.ver p := &Kubestr{cli: cli, sdsfgValidator: tc.sdsfg} cap, err := p.isK8sVersionCSISnapshotCapable(ctx) c.Check(err, tc.checker) c.Assert(cap, Equals, tc.capable) } } type fakeSDSFGValidator struct { err error cap bool } func (f *fakeSDSFGValidator) validate(ctx context.Context) (bool, error) { return f.cap, f.err } func (s *ProvisionerTestSuite) TestValidateVolumeSnapshotClass(c *C) { for _, tc := range []struct { vsc unstructured.Unstructured groupVersion string out *VSCInfo }{ { vsc: unstructured.Unstructured{ Object: map[string]interface{}{ "metadata": map[string]interface{}{ "name": "vsc1", }, "driver": "something", }, }, groupVersion: "snapshot.storage.k8s.io/v1", out: &VSCInfo{ Name: "vsc1", }, }, { // failure vsc: unstructured.Unstructured{ Object: map[string]interface{}{ "metadata": map[string]interface{}{ "name": "vsc1", }, "notdriver": "something", }, }, groupVersion: "snapshot.storage.k8s.io/v1", out: &VSCInfo{ Name: "vsc1", StatusList: []Status{ makeStatus(StatusError, fmt.Sprintf("VolumeSnapshotClass (%s) missing 'driver' field", "vsc1"), nil), }, }, }, } { p := &Kubestr{} out := p.validateVolumeSnapshotClass(tc.vsc, tc.groupVersion) c.Assert(out.Name, Equals, tc.out.Name) c.Assert(len(out.StatusList), Equals, len(tc.out.StatusList)) } } func (s *ProvisionerTestSuite) TestLoadStorageClassesAndProvisioners(c *C) { ctx := context.Background() p := &Kubestr{cli: fake.NewSimpleClientset( &scv1.StorageClass{ObjectMeta: metav1.ObjectMeta{Name: "sc1"}, Provisioner: "provisioner1"}, &scv1.StorageClass{ObjectMeta: metav1.ObjectMeta{Name: "sc2"}, Provisioner: "provisioner2"}, )} scs, err := p.loadStorageClasses(ctx) c.Assert(err, IsNil) c.Assert(len(scs.Items), Equals, 2) c.Assert(scs, Equals, p.storageClassList) // reload has the same p.cli = fake.NewSimpleClientset() scs, err = p.loadStorageClasses(ctx) c.Assert(err, IsNil) c.Assert(len(scs.Items), Equals, 2) c.Assert(scs, Equals, p.storageClassList) // proviosners uses loaded list provisioners, err := p.provisionerList(ctx) c.Assert(err, IsNil) c.Assert(len(provisioners), Equals, 2) } func (s *ProvisionerTestSuite) TestLoadVolumeSnaphsotClasses(c *C) { ctx := context.Background() scheme := runtime.NewScheme() scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "snapshot.storage.k8s.io", Version: "v1", Kind: "VolumeSnapshotClassList"}, &unstructured.UnstructuredList{}) p := &Kubestr{dynCli: fakedynamic.NewSimpleDynamicClient(scheme, &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": fmt.Sprintf("%s/%s", kansnapshot.GroupName, kansnapshot.Version), "kind": "VolumeSnapshotClass", "metadata": map[string]interface{}{ "name": "theVSC", }, "driver": "somesnapshotter", "deletionPolicy": "Delete", }, })} vsc, err := p.loadVolumeSnapshotClasses(ctx, kansnapshot.Version) c.Assert(err, IsNil) c.Assert(len(vsc.Items), Equals, 1) c.Assert(vsc, Equals, p.volumeSnapshotClassList) // reload has the same p.dynCli = fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()) vsc, err = p.loadVolumeSnapshotClasses(ctx, kansnapshot.Version) c.Assert(err, IsNil) c.Assert(len(vsc.Items), Equals, 1) c.Assert(vsc, Equals, p.volumeSnapshotClassList) } func (s *ProvisionerTestSuite) TestGetCSIGroupVersion(c *C) { for _, tc := range []struct { resources []*metav1.APIResourceList out *metav1.GroupVersionForDiscovery }{ { resources: []*metav1.APIResourceList{ { GroupVersion: "/////", }, }, out: nil, }, { resources: []*metav1.APIResourceList{ { GroupVersion: "snapshot.storage.k8s.io/v1beta1", }, { GroupVersion: "snapshot.storage.k8s.io/v1apha1", }, }, out: &metav1.GroupVersionForDiscovery{ GroupVersion: "snapshot.storage.k8s.io/v1beta1", Version: "v1beta1", }, }, { resources: []*metav1.APIResourceList{ { GroupVersion: "NOTsnapshot.storage.k8s.io/v1beta1", }, }, out: nil, }, } { cli := fake.NewSimpleClientset() cli.Discovery().(*discoveryfake.FakeDiscovery).Resources = tc.resources p := &Kubestr{cli: cli} out := p.getCSIGroupVersion() c.Assert(out, DeepEquals, tc.out) } } func (s *ProvisionerTestSuite) TestGetDriverNameFromUVSC(c *C) { for _, tc := range []struct { vsc unstructured.Unstructured version string out string }{ { // beta success vsc: unstructured.Unstructured{ Object: map[string]interface{}{ "driver": "drivername", }, }, version: "snapshot.storage.k8s.io/v1", out: "drivername", }, { // key missing vsc: unstructured.Unstructured{ Object: map[string]interface{}{}, }, version: "snapshot.storage.k8s.io/v1", out: "", }, { // type conversion vsc: unstructured.Unstructured{ Object: map[string]interface{}{ "driver": int64(1), }, }, version: "snapshot.storage.k8s.io/v1", out: "", }, } { p := &Kubestr{} out := p.getDriverNameFromUVSC(tc.vsc, tc.version) c.Assert(out, Equals, tc.out) } } // func (s *ProvisionerTestSuite) TestGetDriverStats(c *C) { // var snapshotCount int // var expansionCount int // var cloningCount int // featureMap := make(map[string]struct{}) // for _, driver := range CSIDriverList { // if strings.Contains("Snapshot", driver.Features) { // snapshotCount++ // } // if strings.Contains("Expansion", driver.Features) { // expansionCount++ // } // if strings.Contains("Cloning", driver.Features) { // cloningCount++ // } // featureMap[driver.Features] = struct{}{} // } // c.Log("totalcsidrivers: ", len(CSIDriverList)) // c.Log("snapshotCount: ", snapshotCount) // c.Log("expansionCount: ", expansionCount) // c.Log("cloningCount: ", cloningCount) // c.Log("unique combinations: ", len(featureMap)) // c.Assert(true, Equals, false) // } ================================================ FILE: pkg/kubestr/utils.go ================================================ package kubestr import ( "fmt" "os" ) const ( // ErrorColor formatted color red ErrorColor = "\033[1;31m%s\033[0m" // SuccessColor formatted color green SuccessColor = "\033[1;32m%s\033[0m" // YellowColor formatted color yellow YellowColor = "\033[1;33m%s\033[0m" ) // Status is a generic structure to return a status type Status struct { StatusCode StatusCode StatusMessage string Raw interface{} `json:",omitempty"` } // StatusCode type definition type StatusCode string const ( // StatusOK is the success status code StatusOK = StatusCode("OK") // StatusWarning is the informational status code StatusWarning = StatusCode("Warning") // StatusError is the failure status code StatusError = StatusCode("Error") // StatusInfo is the Info status code StatusInfo = StatusCode("Info") ) // Print prints a status message with a given prefix func (s *Status) Print(prefix string) { switch s.StatusCode { case StatusOK: printSuccessMessage(prefix + s.StatusMessage) case StatusError: printErrorMessage(prefix + s.StatusMessage) case StatusWarning: printWarningMessage(prefix + s.StatusMessage) default: printInfoMessage(prefix + s.StatusMessage) } } // printErrorMessage prints the error message func printErrorMessage(errorMesg string) { fmt.Printf("%s - ", errorMesg) fmt.Printf(ErrorColor, "Error") fmt.Println() } // printSuccessMessage prints the success message func printSuccessMessage(message string) { fmt.Printf("%s - ", message) fmt.Printf(SuccessColor, "OK") fmt.Println() } func printSuccessColor(message string) { fmt.Printf(SuccessColor, message) fmt.Println() } // printInfoMessage prints a warning func printInfoMessage(message string) { fmt.Println(message) } // printWarningMessage prints a warning func printWarningMessage(message string) { fmt.Printf(YellowColor+"\n", message) } // TestOutput is the generic return value for tests type TestOutput struct { TestName string Status []Status Raw interface{} `json:",omitempty"` } // Print prints a TestRetVal as a string output func (t *TestOutput) Print() { fmt.Println(t.TestName + ":") for _, status := range t.Status { status.Print(" ") } } func MakeTestOutput(testname string, code StatusCode, mesg string, raw interface{}) *TestOutput { return &TestOutput{ TestName: testname, Status: []Status{makeStatus(code, mesg, nil)}, Raw: raw, } } func makeStatus(code StatusCode, mesg string, raw interface{}) Status { return Status{ StatusCode: code, StatusMessage: mesg, Raw: raw, } } func convertSetToSlice(set map[string]struct{}) []string { var slice []string for i := range set { slice = append(slice, i) } return slice } // getPodNamespace gets the pods namespace or returns default func getPodNamespace() string { if val, ok := os.LookupEnv(PodNamespaceEnvKey); ok { return val } return DefaultNS } ================================================ FILE: scripts/load_csi_provisioners.sh ================================================ #!/usr/bin/env bash set -o errexit set -o nounset set -o pipefail CLEANSED_STR="" cleanse_str() { case "$1" in "org.democratic-csi.[X]") CLEANSED_STR="org.democratic-csi" ;; "[x].ember-csi.io") CLEANSED_STR="ember-csi.io" ;; *) CLEANSED_STR="$1" esac } current_directory=$(dirname "$0") # The Driver information is scraped from the `Production Drivers` table on this page curl https://raw.githubusercontent.com/kubernetes-csi/docs/master/book/src/drivers.md -o ${current_directory}/../extra/csi-drivers cat <> ${current_directory}/../extra/csi-drivers-temp.go package kubestr // THIS FILE IS AUTO_GENERATED. // To generate file run "go generate" at the top level // This file must be checked in. EOT # The `Production Drivers` table has 8 columns as of now, # with the last column of `Other Features` skipped for quite a few of the drivers. MIN_COLS_PROD_DRIVERS=7 echo "var CSIDriverList = []*CSIDriver{" >> ${current_directory}/../extra/csi-drivers-temp.go while read p; do if [[ $p == [* ]]; then IFS='|' read -a fields <<< "$p" if [[ ${#fields[@]} -lt $MIN_COLS_PROD_DRIVERS ]]; then echo skipping "${fields[0]}" continue fi name_url=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[0]}) driver_name=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[1]} | sed 's/`//g') versions=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[2]}) description=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[3]}) persistence=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[4]}) access_modes=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[5]}| sed 's/"//g') dynamic_provisioning=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[6]}) if [[ ${#fields[@]} -gt $MIN_COLS_PROD_DRIVERS ]]; then features=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[7]}) fi cleanse_str "${driver_name}" driver_name="${CLEANSED_STR}" echo "{NameUrl: \"$name_url\", DriverName: \"$driver_name\", Versions: \"$versions\", Description: \"$description\", Persistence: \"$persistence\", AccessModes: \"$access_modes\", DynamicProvisioning: \"$dynamic_provisioning\", Features: \"$features\"}," >> ${current_directory}/../extra/csi-drivers-temp.go fi done <${current_directory}/../extra/csi-drivers echo "}" >> ${current_directory}/../extra/csi-drivers-temp.go gofmt ${current_directory}/../extra/csi-drivers-temp.go > ${current_directory}/../pkg/kubestr/csi-drivers.go rm ${current_directory}/../extra/csi-drivers-temp.go