[
  {
    "path": ".github/dependabot.yaml",
    "content": "version: 2\nupdates:\n  - package-ecosystem: gomod\n    commit-message:\n      prefix: \"deps(go):\"\n    directory: \"/\"\n    ignore:\n      # Avoids unnecessarily auto-creating PRs for k8s dependencies, as these\n      # will be closed since k8s dependencies need to be updated all at once\n      # starting with kanister and go through additional validation.\n      - dependency-name: \"k8s.io/*\"\n      - dependency-name: \"sigs.k8s.io/*\"\n    open-pull-requests-limit: 5\n    schedule:\n      interval: daily\n  - package-ecosystem: github-actions\n    commit-message:\n      prefix: \"deps(actions):\"\n    directory: \"/\"\n    open-pull-requests-limit: 3\n    schedule:\n      interval: monthly\n    groups:\n      github-actions:\n        patterns:\n        - \"actions/*\"\n        - \"github/codeql-action\"\n      docker:\n        patterns:\n        - \"docker/*\"\n  - package-ecosystem: docker\n    commit-message:\n      prefix: \"deps(docker):\"\n    directory: \"/\"\n    open-pull-requests-limit: 4\n    schedule:\n      interval: monthly\n    groups:\n      all:\n        patterns:\n        - \"*\"\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: CI\n\non:\n  push:\n    branches:\n    - main\n    - master\n    tags:\n    - v*\n\n  pull_request:\n\npermissions:\n  contents: read\n\njobs:\n\n  build:\n    name: Build\n    runs-on: ubuntu-latest\n    steps:\n    -\n      name: Check out code\n      uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n    -\n      name: Set up Go\n      uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0\n      with:\n        go-version-file: 'go.mod'\n      id: go\n    -\n      name: Build\n      run: go build -v .\n    -\n      name: Test\n      run: go test -v ./...\n\n  lint:\n    name: Lint\n    runs-on: ubuntu-latest\n    steps:\n      -\n        name: Checkout code\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n      -\n        name: Set up Go\n        uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0\n        with:\n          go-version-file: 'go.mod'\n      -\n        name: golangci-lint\n        uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0\n        with:\n          # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.\n          version: v2.2.1\n          args: --timeout=5m --modules-download-mode=mod\n          skip-cache: true\n"
  },
  {
    "path": ".github/workflows/dependency-review.yaml",
    "content": "# Dependency Review Action\n#\n# This workflow scans dependency manifest files that change as part of a pull\n# reqest, surfacing known-vulnerable versions of the packages declared or\n# updated in the PR.\n# If the workflow run is marked as required, PRs introducing known-vulnerable\n# packages will be blocked from merging.\n#\n# Source repository: https://github.com/actions/dependency-review-action\n# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement\n#\nname: 'Dependency Review'\non: [pull_request]\n\npermissions:\n  contents: read\n\njobs:\n  dependency-review:\n    runs-on: ubuntu-latest\n    steps:\n      - name: 'Checkout Repository'\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n      - name: 'Dependency Review'\n        uses: actions/dependency-review-action@2031cfc080254a8a887f58cffee85186f0e49e48 # v4.9.0\n"
  },
  {
    "path": ".github/workflows/docker-publish.yml",
    "content": "name: Docker\n\npermissions:\n  contents: read\n\non:\n  push:\n    branches:\n    - main\n    - master\n\n    # Publish `v1.2.3` tags as releases.\n    tags:\n    - v*\n  pull_request:\n\nenv:\n  REGISTRY: ghcr.io\n  IMAGE_NAME: ${{ github.repository }}\n\njobs:\n  push:\n    permissions:\n      packages: write\n      contents: read\n    runs-on: ubuntu-latest\n\n    steps:\n      - name: Check out code into the Go module directory\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n\n      # Extract metadata (tags, labels) for Docker\n      # https://github.com/docker/metadata-action\n      - name: Extract Docker metadata\n        id: meta\n        uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6.0.0\n        with:\n          images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}\n\n      # This action can be useful if you want to add emulation\n      # support with QEMU to be able to build against more platforms.\n      - name: Set up QEMU\n        uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4.0.0\n\n      # This action will create and boot a builder using\n      # by default the docker-container builder driver.\n      # Recommended for build multi-platform images, export cache, etc.\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0\n\n      - name: Log into ${{ env.REGISTRY }}\n        if: github.event_name != 'pull_request'\n        uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4.0.0\n        with:\n          registry: ${{ env.REGISTRY }}\n          username: ${{ github.actor }}\n          password: ${{ secrets.GITHUB_TOKEN }}\n\n      # Build and push Docker image with Buildx (don't push on PR)\n      # https://github.com/docker/build-push-action\n      - name: Build and push Docker image\n        uses: docker/build-push-action@d08e5c354a6adb9ed34480a06d141179aa583294 # v7.0.0\n        with:\n          platforms: linux/amd64,linux/arm64,linux/ppc64le\n          context: .\n          push: ${{ github.event_name != 'pull_request' }}\n          tags: ${{ steps.meta.outputs.tags }}\n          labels: ${{ steps.meta.outputs.labels }}\n"
  },
  {
    "path": ".github/workflows/ossf-scorecard.yml",
    "content": "name: OSSF Scorecard\non:\n  # For Branch-Protection check. Only the default branch is supported. See\n  # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection\n  branch_protection_rule:\n  push:\n    branches: [ \"master\" ]\n  # To guarantee Maintained check is occasionally updated. See\n  # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained\n  schedule:\n    - cron: '25 6 * * 5'\n  workflow_dispatch:\n    inputs:\n      ref:\n        description: 'branch or git ref to use for the build'\n        required: true\n        default: 'master'\n\n# Declare default permissions as read only.\npermissions: read-all\n\njobs:\n  analysis:\n    name: Scorecard analysis\n    runs-on: ubuntu-latest\n    permissions:\n      # Needed to upload the results to code-scanning dashboard.\n      security-events: write\n      # Needed to publish results and get a badge\n      id-token: write\n\n    steps:\n      -\n        name: \"Checkout repo\"\n        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n        with:\n          persist-credentials: false\n      -\n        name: \"Run analysis\"\n        uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3\n        with:\n          results_file: results.sarif\n          results_format: sarif\n          publish_results: true\n      -\n        # Upload the results to GitHub's code scanning dashboard.\n        name: \"Upload to results to dashboard\"\n        uses: github/codeql-action/upload-sarif@c10b8064de6f491fea524254123dbe5e09572f13 # v4.35.1\n        with:\n          sarif_file: results.sarif\n      -\n        name: \"Upload analysis results as 'Job Artifact'\"\n        uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0\n        with:\n          name: SARIF file\n          path: results.sarif\n          retention-days: 5\n"
  },
  {
    "path": ".github/workflows/release.yaml",
    "content": "name: Release\n\npermissions:\n  contents: read\n\non:\n  release:\n    types:\n      - created\n      - published\n\njobs:\n  goreleaser:\n    name: Release Go Binary\n    runs-on: ubuntu-latest\n    permissions:\n      contents: write\n    steps:\n    - name: Checkout\n      uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2\n      with:\n        fetch-depth: 0\n    - name: Set up Go\n      uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0\n      with:\n        go-version-file: 'go.mod'\n    - name: Run GoReleaser\n      uses: goreleaser/goreleaser-action@ec59f474b9834571250b370d4735c50f8e2d1e29 # v7.0.0\n      with:\n        distribution: goreleaser\n        version: latest\n        args: release --clean\n      env:\n        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n"
  },
  {
    "path": ".goreleaser.yml",
    "content": "# This is an example goreleaser.yaml file with some sane defaults.\n# Make sure to check the documentation at http://goreleaser.com\nbefore:\n  hooks:\n    # You may remove this if you don't use go modules.\n    - go mod download\nbuilds:\n  - env:\n      - CGO_ENABLED=0\n      - GO_EXTLINK_ENABLED=0\n    goos:\n      - linux\n      - windows\n      - darwin\n    goarch:\n      - amd64\n      - arm64\n\narchives:\n  - name_template: >-\n      {{ .ProjectName }}_\n      {{- .Version }}_\n      {{- if eq .Os \"darwin\" }}MacOS\n      {{- else if eq .Os \"linux\" }}Linux\n      {{- else if eq .Os \"windows\" }}Windows\n      {{- else }}{{ .Os }}{{ end }}_\n      {{- .Arch }}\nchecksum:\n  name_template: 'checksums.txt'\nsnapshot:\n  name_template: \"{{ .Tag }}-next\"\nchangelog:\n  sort: asc\n  filters:\n    exclude:\n      - '^docs:'\n      - '^test:'\n"
  },
  {
    "path": "Dockerfile",
    "content": "ARG BUILDPLATFROM\n\nFROM --platform=$BUILDPLATFORM golang:1.26.1-bookworm@sha256:8e8aa801e8417ef0b5c42b504dd34db3db911bb73dba933bd8bde75ed815fdbb AS builder\n\nARG TARGETOS\nARG TARGETARCH\nARG TARGETPLATFROM\n\nENV GO111MODULE=on \\\n    CGO_ENABLED=0 \\\n    GOOS=${TARGETOS} \\\n    GOARCH=${TARGETARCH} \n\nWORKDIR /app\n\nCOPY go.mod .\nCOPY go.sum .\n\nRUN go mod download\n\nCOPY . .\n\nRUN go build -o /dist/kubestr -ldflags=\"-w -s\" .\n\nFROM alpine:3.23@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659\n\nRUN apk --no-cache add fio\n\nCOPY --from=builder /dist/kubestr /\n\nENTRYPOINT [\"/kubestr\"]\n"
  },
  {
    "path": "FIO.md",
    "content": "# FIO\n\n[![asciicast](https://asciinema.org/a/D9EFwlEUVx787hayFapdHljBW.svg)](https://asciinema.org/a/D9EFwlEUVx787hayFapdHljBW)\n\n## More info coming soon \n\n## Examples of FIO files-\n\nHere are some [examples](https://github.com/axboe/fio/tree/master/examples)\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# Kubestr\n\n## What is it?\n\nKubestr is a collection of tools to discover, validate and evaluate your kubernetes storage options.\n\nAs adoption of kubernetes grows so have the persistent storage offerings that are available to users. The introduction of [CSI](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/) (Container Storage Interface) has enabled storage providers to develop drivers with ease. In fact there are around a 100 different CSI drivers available today. Along with the existing in-tree providers, these options can make choosing the right storage difficult.\n\nKubestr can assist in the following ways-\n- Identify the various storage options present in a cluster.\n- Validate if the storage options are configured correctly.\n- Evaluate the storage using common benchmarking tools like FIO.\n\n[![asciicast](https://asciinema.org/a/7iJTbWKwdhPHNWYV00LIgx7gn.svg)](https://asciinema.org/a/7iJTbWKwdhPHNWYV00LIgx7gn)\n\n## Resources \nVideo \n* [Cloud Native Live: Introducing Kubestr – A New Way to Explore your Kubernetes Storage Options](https://youtu.be/N79NY_0aO0w)\n* [Introducing Kubestr - A handy tool for Kubernetes Storage](https://youtu.be/U3Rt9vcuQdc)\n* [A new way to benchmark your kubernetes storage DoK Talks #71](https://www.youtube.com/watch?v=g64eIOk_Ob4)\n\n\nBlogs \n* [Benchmarking and Evaluating Your Kubernetes Storage with Kubestr](https://blog.kasten.io/benchmarking-kubernetes-storage-with-kubestr)\n* [Kubestr: The Easy Button for Validating and Debugging Your Storage in Kubernetes](https://thenewstack.io/kubestr-the-easy-button-for-validating-and-debugging-your-storage-in-kubernetes/)\n* [Introducing Kubestr - A handy tool for Kubernetes Storage](https://vzilla.co.uk/vzilla-blog/introducing-kubestr-a-handy-tool-for-kubernetes-storage)\n\n\n## Using Kubestr\n### To install the tool -\n- Ensure that the kubernetes context is set and the cluster is accessible through your terminal. (Does [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) work?)\n- Download the latest release [here](https://github.com/kastenhq/kubestr/releases/latest).\n- Unpack the tool and make it an executable `chmod +x kubestr`.\n\n### To discover available storage options -\n- Run `./kubestr`\n\n### To run an FIO test -\n- Run `./kubestr fio -s <storage class>`\n- Additional options like `--size` and `--fiofile` can be specified.\n- For more information visit our [fio](https://github.com/kastenhq/kubestr/blob/master/FIO.md) page.\n\n### To check a CSI drivers snapshot and restore capabilities -\n- Run `./kubestr csicheck -s <storage class> -v <volume snapshot class>`\n\n### To check if a StorageClass supports a block mount -\n- Run `./kubestr blockmount -s StorageClass`\n\n## Roadmap\n- In the future we plan to allow users to post their FIO results and compare to others.\n"
  },
  {
    "path": "_config.yml",
    "content": "theme: jekyll-theme-cayman\ntitle: Kubestr\ndescription: Explore your kubernetes storage options\n"
  },
  {
    "path": "_posts/2021-02-07-FasterStorage.md",
    "content": "---\nlayout: post\ntitle: \"Faster Storage\"\ndate: 2021-02-07 \npublished: true\ncategories: fio storage\n---\n\nSome content other\n"
  },
  {
    "path": "cmd/rootCmd.go",
    "content": "// Copyright 2020 Kubestr Developers\n\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n\n// \thttp://www.apache.org/licenses/LICENSE-2.0\n\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage cmd\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/kastenhq/kubestr/pkg/block\"\n\t\"github.com/kastenhq/kubestr/pkg/csi\"\n\tcsitypes \"github.com/kastenhq/kubestr/pkg/csi/types\"\n\t\"github.com/kastenhq/kubestr/pkg/fio\"\n\t\"github.com/kastenhq/kubestr/pkg/kubestr\"\n\t\"github.com/spf13/cobra\"\n)\n\nvar (\n\toutput  string\n\toutfile string\n\trootCmd = &cobra.Command{\n\t\tUse:   \"kubestr\",\n\t\tShort: \"A tool to validate kubernetes storage\",\n\t\tLong: `kubestr is a tool that will scan your k8s cluster\n\t\tand validate that the storage systems in place as well as run\n\t\tperformance tests.`,\n\t\tSilenceUsage: true,\n\t\tArgs:         cobra.ExactArgs(0),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)\n\t\t\tdefer cancel()\n\t\t\treturn Baseline(ctx, output)\n\t\t},\n\t}\n\n\tstorageClass   string\n\tnamespace      string\n\tcontainerImage string\n\n\tfioCheckerSize     string\n\tfioNodeSelector    map[string]string\n\tfioCheckerFilePath string\n\tfioCheckerTestName string\n\tfioCmd             = &cobra.Command{\n\t\tUse:   \"fio\",\n\t\tShort: \"Runs an fio test\",\n\t\tLong:  `Run an fio test`,\n\t\tArgs:  cobra.ExactArgs(0),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)\n\t\t\tdefer cancel()\n\t\t\treturn Fio(ctx, output, outfile, storageClass, fioCheckerSize, namespace, fioNodeSelector, fioCheckerTestName, fioCheckerFilePath, containerImage)\n\t\t},\n\t}\n\n\tcsiCheckVolumeSnapshotClass string\n\tcsiCheckRunAsUser           int64\n\tcsiCheckCleanup             bool\n\tcsiCheckSkipCFSCheck        bool\n\tcsiCheckCmd                 = &cobra.Command{\n\t\tUse:   \"csicheck\",\n\t\tShort: \"Runs the CSI snapshot restore check\",\n\t\tLong:  \"Validates a CSI provisioners ability to take a snapshot of an application and restore it\",\n\t\tArgs:  cobra.ExactArgs(0),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)\n\t\t\tdefer cancel()\n\t\t\treturn CSICheck(ctx, output, outfile, namespace, storageClass, csiCheckVolumeSnapshotClass, csiCheckRunAsUser, containerImage, csiCheckCleanup, csiCheckSkipCFSCheck)\n\t\t},\n\t}\n\n\tbrowseLocalPort int\n\tbrowseCmd       = &cobra.Command{\n\t\tUse:        \"browse\",\n\t\tShort:      \"Browse the contents of PVC or VolumeSnapshot\",\n\t\tLong:       \"Browse the contents of a CSI provisioned PVC or a CSI provisioned VolumeSnapshot.\",\n\t\tDeprecated: \"use 'browse pvc' instead\",\n\t\tArgs:       cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn browsePvcCmd.RunE(cmd, args)\n\t\t},\n\t}\n\n\tshowTree bool\n\n\tbrowsePvcCmd = &cobra.Command{\n\t\tUse:   \"pvc [PVC name]\",\n\t\tShort: \"Browse the contents of a CSI PVC via file browser\",\n\t\tLong:  \"Browse the contents of a CSI provisioned PVC by cloning the volume and mounting it with a file browser.\",\n\t\tArgs:  cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn CsiPvcBrowse(context.Background(), args[0],\n\t\t\t\tnamespace,\n\t\t\t\tcsiCheckVolumeSnapshotClass,\n\t\t\t\tcsiCheckRunAsUser,\n\t\t\t\tbrowseLocalPort,\n\t\t\t\tshowTree,\n\t\t\t)\n\t\t},\n\t}\n\n\tbrowseSnapshotCmd = &cobra.Command{\n\t\tUse:   \"snapshot [Snapshot name]\",\n\t\tShort: \"Browse the contents of a CSI VolumeSnapshot via file browser\",\n\t\tLong:  \"Browse the contents of a CSI provisioned VolumeSnapshot by cloning the volume and mounting it with a file browser.\",\n\t\tArgs:  cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn CsiSnapshotBrowse(context.Background(), args[0],\n\t\t\t\tnamespace,\n\t\t\t\tcsiCheckRunAsUser,\n\t\t\t\tbrowseLocalPort,\n\t\t\t\tshowTree,\n\t\t\t)\n\t\t},\n\t}\n\n\tfromSnapshot   string\n\tfromPVC        string\n\ttoPVC          string\n\tpath           string\n\trestoreFileCmd = &cobra.Command{\n\t\tUse:   \"file-restore\",\n\t\tShort: \"Restore file(s) from a Snapshot or PVC to it's source PVC\",\n\t\tLong:  \"Restore file(s) from a given CSI provisioned VolumeSnapshot or PersistentVolumeClaim to another PersistentVolumeClaim.\",\n\t\tArgs:  cobra.ExactArgs(0),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn FileRestore(context.Background(),\n\t\t\t\tfromSnapshot,\n\t\t\t\tfromPVC,\n\t\t\t\ttoPVC,\n\t\t\t\tnamespace,\n\t\t\t\tcsiCheckRunAsUser,\n\t\t\t\tbrowseLocalPort,\n\t\t\t\tpath)\n\t\t},\n\t}\n\n\tblockMountRunAsUser          int64\n\tblockMountCleanup            bool\n\tblockMountCleanupOnly        bool\n\tblockMountWaitTimeoutSeconds uint32\n\tblockMountPVCSize            string\n\tblockMountCmd                = &cobra.Command{\n\t\tUse:   \"blockmount\",\n\t\tShort: \"Checks if a storage class supports block volumes\",\n\t\tLong: `Checks if volumes provisioned by a storage class can be mounted in block mode.\n\nThe checker works as follows:\n- It dynamically provisions a volume of the given storage class.\n- It then launches a pod with the volume mounted as a block device.\n- If the pod is successfully created then the test passes.\n- If the pod fails or times out then the test fails.\n\nIn case of failure, re-run the checker with the \"-c=false\" flag and examine the\nfailed PVC and Pod: it may be necessary to adjust the default values used for\nthe PVC size, the pod wait timeout, etc. Clean up the failed resources by\nrunning the checker with the \"--cleanup-only\" flag.\n`,\n\t\tArgs: cobra.ExactArgs(0),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)\n\t\t\tdefer cancel()\n\n\t\t\tcheckerArgs := block.BlockMountCheckerArgs{\n\t\t\t\tStorageClass:          storageClass,\n\t\t\t\tNamespace:             namespace,\n\t\t\t\tCleanup:               blockMountCleanup,\n\t\t\t\tRunAsUser:             blockMountRunAsUser,\n\t\t\t\tContainerImage:        containerImage,\n\t\t\t\tK8sObjectReadyTimeout: (time.Second * time.Duration(blockMountWaitTimeoutSeconds)),\n\t\t\t\tPVCSize:               blockMountPVCSize,\n\t\t\t}\n\t\t\treturn BlockMountCheck(ctx, output, outfile, blockMountCleanupOnly, checkerArgs)\n\t\t},\n\t}\n)\n\nfunc init() {\n\trootCmd.PersistentFlags().StringVarP(&output, \"output\", \"o\", \"\", \"Options(json)\")\n\trootCmd.PersistentFlags().StringVarP(&outfile, \"outfile\", \"e\", \"\", \"The file where test results will be written\")\n\n\trootCmd.AddCommand(fioCmd)\n\tfioCmd.Flags().StringVarP(&storageClass, \"storageclass\", \"s\", \"\", \"The name of a Storageclass. (Required)\")\n\t_ = fioCmd.MarkFlagRequired(\"storageclass\")\n\tfioCmd.Flags().StringVarP(&fioCheckerSize, \"size\", \"z\", fio.DefaultPVCSize, \"The size of the volume used to run FIO. Note that the FIO job definition is not scaled accordingly.\")\n\tfioCmd.Flags().StringVarP(&namespace, \"namespace\", \"n\", fio.DefaultNS, \"The namespace used to run FIO.\")\n\tfioCmd.Flags().StringToStringVarP(&fioNodeSelector, \"nodeselector\", \"N\", map[string]string{}, \"Node selector applied to pod.\")\n\tfioCmd.Flags().StringVarP(&fioCheckerFilePath, \"fiofile\", \"f\", \"\", \"The path to a an fio config file.\")\n\tfioCmd.Flags().StringVarP(&fioCheckerTestName, \"testname\", \"t\", \"\", \"The Name of a predefined kubestr fio test. Options(default-fio)\")\n\tfioCmd.Flags().StringVarP(&containerImage, \"image\", \"i\", \"\", \"The container image used to create a pod.\")\n\n\trootCmd.AddCommand(csiCheckCmd)\n\tcsiCheckCmd.Flags().StringVarP(&storageClass, \"storageclass\", \"s\", \"\", \"The name of a Storageclass. (Required)\")\n\t_ = csiCheckCmd.MarkFlagRequired(\"storageclass\")\n\tcsiCheckCmd.Flags().StringVarP(&csiCheckVolumeSnapshotClass, \"volumesnapshotclass\", \"v\", \"\", \"The name of a VolumeSnapshotClass. (Required)\")\n\t_ = csiCheckCmd.MarkFlagRequired(\"volumesnapshotclass\")\n\tcsiCheckCmd.Flags().StringVarP(&namespace, \"namespace\", \"n\", fio.DefaultNS, \"The namespace used to run the check.\")\n\tcsiCheckCmd.Flags().StringVarP(&containerImage, \"image\", \"i\", \"\", \"The container image used to create a pod.\")\n\tcsiCheckCmd.Flags().BoolVarP(&csiCheckCleanup, \"cleanup\", \"c\", true, \"Clean up the objects created by tool\")\n\tcsiCheckCmd.Flags().Int64VarP(&csiCheckRunAsUser, \"runAsUser\", \"u\", 0, \"Runs the CSI check pod with the specified user ID (int)\")\n\tcsiCheckCmd.Flags().BoolVarP(&csiCheckSkipCFSCheck, \"skipCFScheck\", \"k\", false, \"Use this flag to skip validating the ability to clone a snapshot.\")\n\n\trootCmd.AddCommand(browseCmd)\n\tbrowseCmd.Flags().StringVarP(&csiCheckVolumeSnapshotClass, \"volumesnapshotclass\", \"v\", \"\", \"The name of a VolumeSnapshotClass. (Required)\")\n\t_ = browseCmd.MarkFlagRequired(\"volumesnapshotclass\")\n\tbrowseCmd.PersistentFlags().StringVarP(&namespace, \"namespace\", \"n\", fio.DefaultNS, \"The namespace of the resource to browse.\")\n\tbrowseCmd.PersistentFlags().Int64VarP(&csiCheckRunAsUser, \"runAsUser\", \"u\", 0, \"Runs the inspector pod as a user (int)\")\n\tbrowseCmd.PersistentFlags().IntVarP(&browseLocalPort, \"localport\", \"l\", 8080, \"The local port to expose the inspector\")\n\tbrowseCmd.PersistentFlags().BoolVarP(&showTree, \"show-tree\", \"t\", false, \"Prints the contents of given PVC or VolumeSnapshot\")\n\n\tbrowseCmd.AddCommand(browsePvcCmd)\n\tbrowsePvcCmd.Flags().StringVarP(&csiCheckVolumeSnapshotClass, \"volumesnapshotclass\", \"v\", \"\", \"The name of a VolumeSnapshotClass. (Required)\")\n\t_ = browsePvcCmd.MarkFlagRequired(\"volumesnapshotclass\")\n\n\tbrowseCmd.AddCommand(browseSnapshotCmd)\n\n\trootCmd.AddCommand(restoreFileCmd)\n\trestoreFileCmd.Flags().StringVarP(&fromSnapshot, \"fromSnapshot\", \"f\", \"\", \"The name of a VolumeSnapshot.\")\n\trestoreFileCmd.Flags().StringVarP(&fromPVC, \"fromPVC\", \"v\", \"\", \"The name of a PersistentVolumeClaim.\")\n\trestoreFileCmd.MarkFlagsMutuallyExclusive(\"fromSnapshot\", \"fromPVC\")\n\trestoreFileCmd.MarkFlagsOneRequired(\"fromSnapshot\", \"fromPVC\")\n\trestoreFileCmd.Flags().StringVarP(&toPVC, \"toPVC\", \"t\", \"\", \"The name of a PersistentVolumeClaim.\")\n\trestoreFileCmd.Flags().StringVarP(&namespace, \"namespace\", \"n\", fio.DefaultNS, \"The namespace of both the given PVC & VS.\")\n\trestoreFileCmd.Flags().Int64VarP(&csiCheckRunAsUser, \"runAsUser\", \"u\", 0, \"Runs the inspector pod as a user (int)\")\n\trestoreFileCmd.Flags().IntVarP(&browseLocalPort, \"localport\", \"l\", 8080, \"The local port to expose the inspector\")\n\trestoreFileCmd.Flags().StringVarP(&path, \"path\", \"p\", \"\", \"Path of a file or directory that needs to be restored\")\n\n\trootCmd.AddCommand(blockMountCmd)\n\tblockMountCmd.Flags().StringVarP(&storageClass, \"storageclass\", \"s\", \"\", \"The name of a StorageClass. (Required)\")\n\t_ = blockMountCmd.MarkFlagRequired(\"storageclass\")\n\tblockMountCmd.Flags().StringVarP(&namespace, \"namespace\", \"n\", fio.DefaultNS, \"The namespace used to run the check.\")\n\tblockMountCmd.Flags().StringVarP(&containerImage, \"image\", \"i\", \"\", \"The container image used to create a pod.\")\n\tblockMountCmd.Flags().BoolVarP(&blockMountCleanup, \"cleanup\", \"c\", true, \"Clean up the objects created by the check.\")\n\tblockMountCmd.Flags().BoolVarP(&blockMountCleanupOnly, \"cleanup-only\", \"\", false, \"Do not run the checker, but just clean up resources left from a previous invocation.\")\n\tblockMountCmd.Flags().Int64VarP(&blockMountRunAsUser, \"runAsUser\", \"u\", 0, \"Runs the block mount check pod with the specified user ID (int)\")\n\tblockMountCmd.Flags().Uint32VarP(&blockMountWaitTimeoutSeconds, \"wait-timeout\", \"w\", 60, \"Max time in seconds to wait for the check pod to become ready\")\n\tblockMountCmd.Flags().StringVarP(&blockMountPVCSize, \"pvc-size\", \"\", \"1Gi\", \"The size of the provisioned PVC.\")\n}\n\n// Execute executes the main command\nfunc Execute() error {\n\treturn rootCmd.Execute()\n}\n\n// Baseline executes the baseline check\nfunc Baseline(ctx context.Context, output string) error {\n\tp, err := kubestr.NewKubestr()\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn err\n\t}\n\tfmt.Print(kubestr.Logo)\n\tresult := p.KubernetesChecks()\n\n\tif PrintAndJsonOutput(result, output, outfile) {\n\t\treturn err\n\t}\n\n\tfor _, retval := range result {\n\t\tretval.Print()\n\t\tfmt.Println()\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n\n\tprovisionerList, err := p.ValidateProvisioners(ctx)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn err\n\t}\n\n\tfmt.Println(\"Available Storage Provisioners:\")\n\tfmt.Println()\n\ttime.Sleep(500 * time.Millisecond) // Added to introduce lag.\n\tfor _, provisioner := range provisionerList {\n\t\tprovisioner.Print()\n\t\tfmt.Println()\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n\treturn err\n}\n\n// PrintAndJsonOutput Print JSON output to stdout and to file if arguments say so\n// Returns whether we have generated output or JSON\nfunc PrintAndJsonOutput(result []*kubestr.TestOutput, output string, outfile string) bool {\n\tif output == \"json\" {\n\t\tjsonRes, _ := json.MarshalIndent(result, \"\", \"    \")\n\t\tif len(outfile) > 0 {\n\t\t\terr := os.WriteFile(outfile, jsonRes, 0666)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error writing output:\", err.Error())\n\t\t\t\tos.Exit(2)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(string(jsonRes))\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\n// Fio executes the FIO test.\nfunc Fio(ctx context.Context, output, outfile, storageclass, size, namespace string, nodeSelector map[string]string, jobName, fioFilePath string, containerImage string) error {\n\tcli, err := kubestr.LoadKubeCli()\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn err\n\t}\n\tfioRunner := &fio.FIOrunner{\n\t\tCli: cli,\n\t}\n\ttestName := \"FIO test results\"\n\tvar result *kubestr.TestOutput\n\tfioResult, err := fioRunner.RunFio(ctx, &fio.RunFIOArgs{\n\t\tStorageClass:   storageclass,\n\t\tSize:           size,\n\t\tNamespace:      namespace,\n\t\tNodeSelector:   nodeSelector,\n\t\tFIOJobName:     jobName,\n\t\tFIOJobFilepath: fioFilePath,\n\t\tImage:          containerImage,\n\t})\n\tif err != nil {\n\t\tresult = kubestr.MakeTestOutput(testName, kubestr.StatusError, err.Error(), fioResult)\n\t} else {\n\t\tresult = kubestr.MakeTestOutput(testName, kubestr.StatusOK, fmt.Sprintf(\"\\n%s\", fioResult.Result.Print()), fioResult)\n\t}\n\tvar wrappedResult = []*kubestr.TestOutput{result}\n\tif !PrintAndJsonOutput(wrappedResult, output, outfile) {\n\t\tresult.Print()\n\t}\n\treturn err\n}\n\nfunc CSICheck(ctx context.Context, output, outfile,\n\tnamespace string,\n\tstorageclass string,\n\tvolumesnapshotclass string,\n\trunAsUser int64,\n\tcontainerImage string,\n\tcleanup bool,\n\tskipCFScheck bool,\n) error {\n\ttestName := \"CSI checker test\"\n\tkubecli, err := kubestr.LoadKubeCli()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to load kubeCli (%s)\", err.Error())\n\t\treturn err\n\t}\n\tdyncli, err := kubestr.LoadDynCli()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to load dynCli (%s)\", err.Error())\n\t\treturn err\n\t}\n\tcsiCheckRunner := &csi.SnapshotRestoreRunner{\n\t\tKubeCli: kubecli,\n\t\tDynCli:  dyncli,\n\t}\n\tvar result *kubestr.TestOutput\n\tcsiCheckResult, err := csiCheckRunner.RunSnapshotRestore(ctx, &csitypes.CSISnapshotRestoreArgs{\n\t\tStorageClass:        storageclass,\n\t\tVolumeSnapshotClass: volumesnapshotclass,\n\t\tNamespace:           namespace,\n\t\tRunAsUser:           runAsUser,\n\t\tContainerImage:      containerImage,\n\t\tCleanup:             cleanup,\n\t\tSkipCFSCheck:        skipCFScheck,\n\t})\n\tif err != nil {\n\t\tresult = kubestr.MakeTestOutput(testName, kubestr.StatusError, err.Error(), csiCheckResult)\n\t} else {\n\t\tresult = kubestr.MakeTestOutput(testName, kubestr.StatusOK, \"CSI application successfully snapshotted and restored.\", csiCheckResult)\n\t}\n\n\tvar wrappedResult = []*kubestr.TestOutput{result}\n\tif !PrintAndJsonOutput(wrappedResult, output, outfile) {\n\t\tresult.Print()\n\t}\n\treturn err\n}\n\nfunc CsiPvcBrowse(ctx context.Context,\n\tpvcName string,\n\tnamespace string,\n\tvolumeSnapshotClass string,\n\trunAsUser int64,\n\tlocalPort int,\n\tshowTree bool,\n) error {\n\tkubecli, err := kubestr.LoadKubeCli()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to load kubeCli (%s)\", err.Error())\n\t\treturn err\n\t}\n\tdyncli, err := kubestr.LoadDynCli()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to load dynCli (%s)\", err.Error())\n\t\treturn err\n\t}\n\tbrowseRunner := &csi.PVCBrowseRunner{\n\t\tKubeCli: kubecli,\n\t\tDynCli:  dyncli,\n\t}\n\terr = browseRunner.RunPVCBrowse(ctx, &csitypes.PVCBrowseArgs{\n\t\tPVCName:             pvcName,\n\t\tNamespace:           namespace,\n\t\tVolumeSnapshotClass: volumeSnapshotClass,\n\t\tRunAsUser:           runAsUser,\n\t\tLocalPort:           localPort,\n\t\tShowTree:            showTree,\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to run PVC browser (%s)\\n\", err.Error())\n\t}\n\treturn err\n}\n\nfunc CsiSnapshotBrowse(ctx context.Context,\n\tsnapshotName string,\n\tnamespace string,\n\trunAsUser int64,\n\tlocalPort int,\n\tshowTree bool,\n) error {\n\tkubecli, err := kubestr.LoadKubeCli()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to load kubeCli (%s)\", err.Error())\n\t\treturn err\n\t}\n\tdyncli, err := kubestr.LoadDynCli()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to load dynCli (%s)\", err.Error())\n\t\treturn err\n\t}\n\tbrowseRunner := &csi.SnapshotBrowseRunner{\n\t\tKubeCli: kubecli,\n\t\tDynCli:  dyncli,\n\t}\n\terr = browseRunner.RunSnapshotBrowse(ctx, &csitypes.SnapshotBrowseArgs{\n\t\tSnapshotName: snapshotName,\n\t\tNamespace:    namespace,\n\t\tRunAsUser:    runAsUser,\n\t\tLocalPort:    localPort,\n\t\tShowTree:     showTree,\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to run Snapshot browser (%s)\\n\", err.Error())\n\t}\n\treturn err\n}\n\nfunc FileRestore(ctx context.Context,\n\tfromSnapshotName string,\n\tfromPVCName string,\n\ttoPVCName string,\n\tnamespace string,\n\trunAsUser int64,\n\tlocalPort int,\n\tpath string,\n) error {\n\tkubecli, err := kubestr.LoadKubeCli()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to load kubeCli (%s)\", err.Error())\n\t\treturn err\n\t}\n\tdyncli, err := kubestr.LoadDynCli()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to load dynCli (%s)\", err.Error())\n\t\treturn err\n\t}\n\tfileRestoreRunner := &csi.FileRestoreRunner{\n\t\tKubeCli: kubecli,\n\t\tDynCli:  dyncli,\n\t}\n\terr = fileRestoreRunner.RunFileRestore(ctx, &csitypes.FileRestoreArgs{\n\t\tFromSnapshotName: fromSnapshotName,\n\t\tFromPVCName:      fromPVCName,\n\t\tToPVCName:        toPVCName,\n\t\tNamespace:        namespace,\n\t\tRunAsUser:        runAsUser,\n\t\tLocalPort:        localPort,\n\t\tPath:             path,\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to run file-restore (%s)\\n\", err.Error())\n\t}\n\treturn err\n}\n\nfunc BlockMountCheck(ctx context.Context, output, outfile string, cleanupOnly bool, checkerArgs block.BlockMountCheckerArgs) error {\n\tkubecli, err := kubestr.LoadKubeCli()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to load kubeCli (%s)\", err.Error())\n\t\treturn err\n\t}\n\tcheckerArgs.KubeCli = kubecli\n\n\tdyncli, err := kubestr.LoadDynCli()\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to load dynCli (%s)\", err.Error())\n\t\treturn err\n\t}\n\tcheckerArgs.DynCli = dyncli\n\n\tblockMountTester, err := block.NewBlockMountChecker(checkerArgs)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to initialize BlockMounter (%s)\", err.Error())\n\t\treturn err\n\t}\n\n\tif cleanupOnly {\n\t\tblockMountTester.Cleanup()\n\t\treturn nil\n\t}\n\n\tvar (\n\t\ttestName = \"Block VolumeMode test\"\n\t\tresult   *kubestr.TestOutput\n\t)\n\n\tmountResult, err := blockMountTester.Mount(ctx)\n\tif err != nil {\n\t\tif !checkerArgs.Cleanup {\n\t\t\tfmt.Printf(\"Warning: Resources may not have been released. Rerun with the additional --cleanup-only flag.\\n\")\n\t\t}\n\t\tresult = kubestr.MakeTestOutput(testName, kubestr.StatusError, fmt.Sprintf(\"StorageClass (%s) does not appear to support Block VolumeMode\", checkerArgs.StorageClass), mountResult)\n\t} else {\n\t\tresult = kubestr.MakeTestOutput(testName, kubestr.StatusOK, fmt.Sprintf(\"StorageClass (%s) supports Block VolumeMode\", checkerArgs.StorageClass), mountResult)\n\t}\n\n\tvar wrappedResult = []*kubestr.TestOutput{result}\n\tif !PrintAndJsonOutput(wrappedResult, output, outfile) {\n\t\tresult.Print()\n\t}\n\n\treturn err\n}\n"
  },
  {
    "path": "docs/README.md",
    "content": "# Kubestr\n\nKubestr is a tool that qualifies the storage options present in a cluster.  \nFor more options visit kubestr.io\n"
  },
  {
    "path": "docs/_config.yml",
    "content": "theme: jekyll-theme-hacker"
  },
  {
    "path": "extra/csi-drivers",
    "content": "# Drivers\nThe following are a set of CSI driver which can be used with Kubernetes:\n\n> NOTE: If you would like your driver to be added to this table, please open a pull request in [this repo](https://github.com/kubernetes-csi/docs/pulls) updating this file. Other Features is allowed to be filled in Raw Block, Snapshot, Expansion and Cloning. If driver did not implement any Other Features, please leave it blank.\n\n> DISCLAIMER: Information in this table has not been validated by Kubernetes SIG-Storage. Users who want to use these CSI drivers need to contact driver maintainers for driver capabilities.\n\n## Production Drivers\n\nName | CSI Driver Name | Compatible with CSI Version(s) | Description | Persistence (Beyond Pod Lifetime) | Supported Access Modes | Dynamic Provisioning | Other Features\n-----|-----------------|--------------------------------|-------------|-----------------------------------|------------------------|----------------------|--------\n[Alicloud Disk](https://github.com/AliyunContainerService/csi-plugin) | `diskplugin.csi.alibabacloud.com` | v1.0 | A Container Storage Interface (CSI) Driver for Alicloud Disk | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot\n[Alicloud NAS](https://github.com/AliyunContainerService/csi-plugin) | `nasplugin.csi.alibabacloud.com` | v1.0 | A Container Storage Interface (CSI) Driver for Alicloud Network Attached Storage (NAS) | Persistent | Read/Write Multiple Pods | No | \n[Alicloud OSS](https://github.com/AliyunContainerService/csi-plugin)| `ossplugin.csi.alibabacloud.com` | v1.0 | A Container Storage Interface (CSI) Driver for Alicloud Object Storage Service (OSS) | Persistent | Read/Write Multiple Pods | No | \n[ArStor CSI](https://github.com/huayun-docs/csi-driver-arstor) | `arstor.csi.huayun.io` | v1.0 | A Container Storage Interface (CSI) Driver for Huayun Storage Service (ArStor) | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning\n[AWS Elastic Block Storage](https://github.com/kubernetes-sigs/aws-ebs-csi-driver) | `ebs.csi.aws.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for AWS Elastic Block Storage (EBS) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion\n[AWS Elastic File System](https://github.com/aws/aws-efs-csi-driver) | `efs.csi.aws.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for AWS Elastic File System (EFS) | Persistent | Read/Write Multiple Pods | No | \n[AWS FSx for Lustre](https://github.com/aws/aws-fsx-csi-driver) | `fsx.csi.aws.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for AWS FSx for Lustre (EBS) | Persistent | Read/Write Multiple Pods | Yes | \n[Azure disk](https://github.com/kubernetes-sigs/azuredisk-csi-driver) | `disk.csi.azure.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Azure disk | Persistent | Read/Write Single Pod | Yes | \n[Azure file](https://github.com/kubernetes-sigs/azurefile-csi-driver) | `file.csi.azure.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Azure file | Persistent | Read/Write Multiple Pods | Yes | \n[BeeGFS](https://github.com/NetApp/beegfs-csi-driver) | `beegfs.csi.netapp.com` | v1.3 | A Container Storage Interface (CSI) Driver for the [BeeGFS](https://www.beegfs.io/) Parallel File System | Persistent | Read/Write Multiple Pods | Yes | \n[Bigtera VirtualStor (block)](https://github.com/bigtera-ce/ceph-csi) | `csi.block.bigtera.com` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI) Driver for Bigtera VirtualStor block storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion\n[Bigtera VirtualStor (filesystem)](https://github.com/bigtera-ce/ceph-csi) | `csi.fs.bigtera.com` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI)  Driver for Bigtera VirtualStor filesystem | Persistent | Read/Write Multiple Pods | Yes | Expansion\n[BizFlyCloud Block Storage](https://github.com/bizflycloud/csi-bizflycloud) | `volume.csi.bizflycloud.vn` | v1.2 | A Container Storage Interface (CSI) Driver for BizFly Cloud block storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion\n[CephFS](https://github.com/ceph/ceph-csi) | `cephfs.csi.ceph.com` | v0.3, >=v1.0.0 | A Container Storage Interface (CSI) Driver for CephFS | Persistent | Read/Write Multiple Pods | Yes | Expansion, Snapshot, Cloning\n[Ceph RBD](https://github.com/ceph/ceph-csi) | `rbd.csi.ceph.com` | v0.3, >=v1.0.0 | A Container Storage Interface (CSI)  Driver for Ceph RBD | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology, Cloning\n[ChubaoFS](https://github.com/chubaofs/chubaofs-csi) | `csi.chubaofs.com` | v1.0.0 | A Container Storage Interface (CSI) Driver for ChubaoFS Storage  | Persistent | Read/Write Multiple Pods | Yes | \n[Cinder](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/cinder) | `cinder.csi.openstack.org` | v0.3, v1.0, v1.1.0, v1.2.0, v1.3.0 | A Container Storage Interface (CSI) Driver for OpenStack Cinder | Persistent and Ephemeral | Depends on the storage backend used | Yes, if storage backend supports it | Raw Block, Snapshot, Expansion, Cloning, Topology\n[cloudscale.ch](https://github.com/cloudscale-ch/csi-cloudscale) | `csi.cloudscale.ch` | v1.0 | A Container Storage Interface (CSI) Driver for the [cloudscale.ch](https://www.cloudscale.ch/) IaaS platform | Persistent | Read/Write Single Pod | Yes |Snapshot\n[Datatom-InfinityCSI](https://github.com/datatom-infinity/infinity-csi) | `csi-infiblock-plugin` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI)  Driver for DATATOM Infinity storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology\n[Datatom-InfinityCSI (filesystem)](https://github.com/datatom-infinity/infinity-csi) | `csi-infifs-plugin` | v0.3, v1.0.0, v1.1.0 | A Container Storage Interface (CSI)  Driver for DATATOM Infinity filesystem storage | Persistent | Read/Write Multiple Pods | Yes | Expansion\n[Datera](https://github.com/Datera/datera-csi) | `dsp.csi.daterainc.io` | v1.0 | A Container Storage Interface (CSI) Driver for Datera Data Services Platform (DSP) | Persistent | Read/Write Single Pod | Yes |Snapshot\n[DDN EXAScaler](https://github.com/DDNStorage/exa-csi-driver) | `exa.csi.ddn.com` | v1.0, v1.1 | A Container Storage Interface (CSI) Driver for DDN EXAScaler filesystems | Persistent | Read/Write Multiple Pods | Yes | Expansion\n[Dell EMC PowerMax](https://github.com/dell/csi-powermax) | `csi-powermax.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC PowerMax](https://www.delltechnologies.com/en-us/storage/powermax.htm) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology\n[Dell EMC PowerScale](https://github.com/dell/csi-powerscale) | `csi-isilon.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC PowerScale](https://www.delltechnologies.com/en-us/storage/powerscale.htm) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology\n[Dell EMC PowerStore](https://github.com/dell/csi-powerstore) | `csi-powerstore.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC PowerStore](https://www.delltechnologies.com/en-us/storage/powerstore-storage-appliance.htm) | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology\n[Dell EMC Unity](https://github.com/dell/csi-unity) | `csi-unity.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC Unity](https://www.delltechnologies.com/en-us/storage/unity.htm) | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology\n[Dell EMC VxFlexOS](https://github.com/dell/csi-vxflexos) | `csi-vxflexos.dellemc.com` | v1.1 | A Container Storage Interface (CSI) Driver for [Dell EMC VxFlexOS](https://www.delltechnologies.com/en-us/hyperconverged-infrastructure/vxflex.htm) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology\n[democratic-csi](https://github.com/democratic-csi/democratic-csi) | `org.democratic-csi.[X]` | v1.0,v1.1,v1.2,v1.3,v1.4,v1.5 | Generic CSI plugin supporting zfs based solutions ([FreeNAS](https://www.freenas.org/) / [TrueNAS](https://www.truenas.com/) and [ZoL](https://zfsonlinux.org/) solutions such as [Ubuntu](https://ubuntu.com/)), [Synology](https://www.synology.com/), and more | Persistent and Ephemeral | Read/Write Single Pod (Block Volume) <br/><br/> Read/Write Multiple Pods (File Volume) | Yes | Raw Block, Snapshot, Expansion, Cloning\n[Diamanti-CSI](https://diamanti.com/use-cases/io-acceleration/#csi) | `dcx.csi.diamanti.com` | v1.0 | A Container Storage Interface (CSI) Driver for Diamanti DCX Platform | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion\n[DigitalOcean Block Storage](https://github.com/digitalocean/csi-digitalocean) | `dobs.csi.digitalocean.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for DigitalOcean Block Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion\n[Dothill-CSI](https://github.com/enix/dothill-csi) | `dothill.csi.enix.io` | v1.3 | Generic CSI plugin supporting [Seagate AssuredSan](https://www.seagate.com/fr/fr/support/dothill-san/assuredsan-pro-5000-series/) appliances such as [HPE MSA](https://www.hpe.com/us/en/storage/flash-hybrid.html), [Dell EMC PowerVault ME4](https://www.dell.com/fr-fr/work/shop/productdetailstxn/powervault-me4-series) and others ... | Persistent | Read/Write Single Node | Yes | Snapshot, Expansion\n[Ember CSI](https://ember-csi.io) | `[x].ember-csi.io` | v0.2, v0.3, v1.0 | Multi-vendor CSI plugin supporting over 80 Drivers to provide block and mount storage to Container Orchestration systems. | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot\n[Excelero NVMesh](https://github.com/Excelero/nvmesh-csi-driver) | `nvmesh-csi.excelero.com` | v1.0, v1.1 | A Container Storage Interface (CSI) Driver for Excelero NVMesh | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Expansion\n[GCE Persistent Disk](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) | `pd.csi.storage.gke.io` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Google Compute Engine Persistent Disk (GCE PD) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology\n[Google Cloud Filestore](https://github.com/kubernetes-sigs/gcp-filestore-csi-driver) | `com.google.csi.filestore` | v0.3 | A Container Storage Interface (CSI) Driver for Google Cloud Filestore | Persistent | Read/Write Multiple Pods | Yes | \n[Google Cloud Storage](https://github.com/ofek/csi-gcs) | `gcs.csi.ofek.dev` | v1.0 | A Container Storage Interface (CSI) Driver for Google Cloud Storage | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Expansion\n[GlusterFS](https://github.com/gluster/gluster-csi-driver) | `org.gluster.glusterfs` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for GlusterFS | Persistent | Read/Write Multiple Pods | Yes | Snapshot\n[Gluster VirtBlock](https://github.com/gluster/gluster-csi-driver) | `org.gluster.glustervirtblock` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Gluster Virtual Block volumes | Persistent | Read/Write Single Pod | Yes | \n[Hammerspace CSI](https://github.com/hammer-space/csi-plugin) | `com.hammerspace.csi` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Hammerspace Storage | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot\n[Hedvig](https://documentation.commvault.com/commvault/hedvig/others/pdf/Hedvig_CSI_User_Guide.pdf) | `io.hedvig.csi` | v1.0 | A Container Storage Interface (CSI) Driver for Hedvig | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion\n[Hetzner Cloud Volumes CSI](https://github.com/hetznercloud/csi-driver) | `csi.hetzner.cloud` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for Hetzner Cloud Volumes | Persistent | Read/Write Single Pod | Yes | Raw Block, Expansion\n[Hitachi Vantara](https://knowledge.hitachivantara.com/Documents/Adapters_and_Drivers/Storage_Adapters_and_Drivers/Containers) | `hspc.csi.hitachi.com` | v1.2 | A Container Storage Interface (CSI) Driver for VSP series Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning\n[HPE](https://github.com/hpe-storage/csi-driver) | `csi.hpe.com` | v1.3 | A [multi-platform](https://scod.hpedev.io/csi_driver) Container Storage Interface (CSI) driver. Supports [HPE Alletra](https://hpe.com/storage/alletra), [Nimble Storage](https://hpe.com/storage/nimble), [Primera](https://hpe.com/storage/primera) and [3PAR](https://hpe.com/storage/3par) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning\n[HPE Ezmeral (MapR)](https://github.com/mapr/mapr-csi) | `com.mapr.csi-kdf` | v1.3 | A Container Storage Interface (CSI) Driver for HPE Ezmeral Data Fabric | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning\n[Huawei Storage CSI](https://github.com/Huawei/eSDK_K8S_Plugin) | `csi.huawei.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for FusionStorage, OceanStor 100D, OceanStor Pacific, OceanStor Dorado V3, OceanStor Dorado V6, OceanStor V3, OceanStor V5 | Persistent | Read/Write Multiple Pod | Yes | Snapshot, Expansion, Cloning\n[HyperV CSI](https://github.com/Zetanova/hyperv-csi-driver) | `eu.zetanova.csi.hyperv` | v1.0, v1.1 | A Container Storage Interface (CSI) driver to manage hyperv hosts | Persistent | Read/Write Multiple Pods | Yes | \n[IBM Block Storage](https://github.com/ibm/ibm-block-csi-driver) | `block.csi.ibm.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) [Driver](https://www.ibm.com/docs/en/stg-block-csi-driver) for IBM Spectrum Virtualize Family, IBM FlashSystem A9000 and A9000R, IBM DS8000 Family 8.x and higher. | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology\n[IBM Spectrum Scale](https://github.com/IBM/ibm-spectrum-scale-csi) | `spectrumscale.csi.ibm.com` | v1.0, v1.1 | A Container Storage Interface (CSI) [Driver](https://www.ibm.com/docs/en/spectrum-scale-csi) for the IBM Spectrum Scale File System | Persistent | Read/Write Multiple Pod | Yes | Snapshot\n[IBM Cloud Block Storage VPC CSI Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block) | `vpc.block.csi.ibm.io` | v1.0 | A Container Storage Interface (CSI) [Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block) for IBM Cloud Kubernetes Service and Red Hat OpenShift on IBM Cloud | Persistent | Read/Write Single Pod | Yes | Raw Block |\n[Infinidat](https://github.com/Infinidat/infinibox-csi-driver) | `infinibox-csi-driver` | v1.0, v1.1 | A Container Storage Interface (CSI) Driver for Infinidat [InfiniBox](https://infinidat.com/en/products-technology/infinibox) | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning\n[Inspur InStorage CSI](https://github.com/OpenInspur/instorage-k8s) | `csi-instorage` | v1.0 | A Container Storage Interface (CSI) Driver for inspur AS/HF/CS/CF Series Primary Storage, inspur AS13000 Series SDS Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning\n[Intel PMEM-CSI](https://github.com/intel/pmem-csi) | `pmem-csi.intel.com` | v1.0 | A Container Storage Interface (CSI) driver for [PMEM](https://pmem.io/) from Intel | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block\n[Intelliflash Block Storage](https://github.com/DDNStorage/intelliflash-csi-block-driver) | `intelliflash-csi-block-driver.intelliflash.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for Intelliflash  Block Storage | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology\n[Intelliflash File Storage](https://github.com/DDNStorage/intelliflash-csi-file-driver) | `intelliflash-csi-file-driver.intelliflash.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for Intelliflash  File Storage | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology\n[ionir ](https://github.com/ionir-cloud) | `ionir` | v1.2 | A Container Storage Interface (CSI) Driver for [ionir](https://www.ionir.com/) Kubernetes-Native Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Cloning\n[JuiceFS](https://github.com/juicedata/juicefs-csi-driver) | `csi.juicefs.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for JuiceFS File System | Persistent | Read/Write Multiple Pod | Yes | \n[kaDalu](https://github.com/kadalu/kadalu) | `org.kadalu.gluster` | v0.3 | A CSI Driver (and operator) for GlusterFS | Persistent | Read/Write Multiple Pods | Yes | \n[KumoScale Block Storage](https://github.com/KioxiaAmerica/kumoscale-csi) | `kumoscale.kioxia.com` | v1.0 | A Container Storage Interface (CSI) Driver for KumoScale Block Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology\n[Linode Block Storage](https://github.com/linode/linode-blockstorage-csi-driver) | `linodebs.csi.linode.com` | v1.0 | A Container Storage Interface (CSI) Driver for Linode Block Storage | Persistent | Read/Write Single Pod | Yes | \n[LINSTOR](https://github.com/piraeusdatastore/linstor-csi) | `linstor.csi.linbit.com` | v1.2 | A Container Storage Interface (CSI) Driver for [LINSTOR](https://www.linbit.com/en/linstor/) volumes | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology\n[Longhorn](https://github.com/longhorn/longhorn) | `driver.longhorn.io` | v1.2 | A Container Storage Interface (CSI) Driver for [Longhorn](https://longhorn.io/) volumes | Persistent | Read/Write Single Node | Yes | Raw Block\n[MacroSAN](https://github.com/macrosan-csi/macrosan-csi-driver) | `csi-macrosan` | v1.0 | A Container Storage Interface (CSI) Driver for MacroSAN Block Storage | Persistent | Read/Write Single Pod | Yes | \n[Manila](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/manila) | `manila.csi.openstack.org` | v1.1, v1.2 | A Container Storage Interface (CSI) Driver for OpenStack Shared File System Service (Manila) | Persistent  | Read/Write Multiple Pods | Yes | Snapshot, Topology\n[MooseFS](https://github.com/moosefs/moosefs-csi) | `com.tuxera.csi.moosefs` | v1.0 | A Container Storage Interface (CSI) Driver for [MooseFS](https://moosefs.com/) clusters. | Persistent | Read/Write Multiple Pods | Yes | \n[NetApp](https://github.com/NetApp/trident) | `csi.trident.netapp.io` | v1.0, v1.1, v1.2, v1.3 | A Container Storage Interface (CSI) Driver for NetApp's [Trident](https://netapp-trident.readthedocs.io/) container storage orchestrator | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning, Topology\n[NexentaStor File Storage](https://github.com/Nexenta/nexentastor-csi-driver) | `nexentastor-csi-driver.nexenta.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for NexentaStor  File Storage | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology\n[NexentaStor Block Storage](https://github.com/Nexenta/nexentastor-csi-driver-block) | `nexentastor-block-csi-driver.nexenta.com` | v1.0, v1.1, v1.2 | A Container Storage Interface (CSI) Driver for NexentaStor over iSCSI protocol | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning, Topology, Raw block\n[Nutanix](https://github.com/nutanix/csi-plugin) | `csi.nutanix.com` | v0.3, v1.0, v1.2 | A Container Storage Interface (CSI) Driver for Nutanix | Persistent | \"Read/Write Single Pod\" with Nutanix Volumes and \"Read/Write Multiple Pods\" with Nutanix Files | Yes | Raw Block, Snapshot, Expansion, Cloning\n[OpenEBS](https://github.com/openebs/csi)| `cstor.csi.openebs.io` | v1.0 | A Container Storage Interface (CSI) Driver for  [OpenEBS](https://www.openebs.io/)| Persistent | Read/Write Single Pod | Yes | Expansion, Snapshot, Cloning\n[Open-E](https://github.com/open-e/JovianDSS-KubernetesCSI) | `com.open-e.joviandss.csi` | v1.0 | A Container Storage Interface (CSI) Driver for Open-E JovianDSS Storage  | Persistent | Read/Write Single Pod | Yes | Snapshot, Cloning \n[Open-Local](https://github.com/alibaba/open-local) | `local.csi.alibaba.com` | v1.0 | A Container Storage Interface (CSI) Driver for Local Storage  | Persistent | Read/Write Single Pod | Yes | Raw Block, Expansion, Snapshot\n[Oracle Cloud Infrastructure(OCI) Block Storage](https://github.com/oracle/oci-cloud-controller-manager/blob/master/container-storage-interface.md) | `blockvolume.csi.oraclecloud.com` | v1.1 | A Container Storage Interface (CSI) Driver for Oracle Cloud Infrastructure (OCI) Block Storage | Persistent | Read/Write Single Pod | Yes | Topology\n[oVirt](https://github.com/openshift/ovirt-csi-driver) | `csi.ovirt.org` | v1.0 | A Container Storage Interface (CSI) Driver for [oVirt](https://ovirt.org)  | Persistent | Read/Write Single Pod | Yes | Block, File Storage\n[Portworx](https://github.com/libopenstorage/openstorage/tree/master/csi) | `pxd.portworx.com` | v1.4 | A Container Storage Interface (CSI) Driver for [Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/csi/) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Raw Block, Cloning\n[Pure Storage CSI](https://github.com/purestorage/pso-csi)| `pure-csi` | v1.0, v1.1, v1.2, v1.3 | A Container Storage Interface (CSI) Driver for Pure Storage's [Pure Service Orchestrator](https://purestorage.com/containers) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Snapshot, Cloning, Raw Block, Topology, Expansion\n[QingCloud CSI](https://github.com/yunify/qingcloud-csi)| `disk.csi.qingcloud.com` | v1.1 | A Container Storage Interface (CSI) Driver for QingCloud Block Storage | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning\n[QingStor CSI](https://github.com/yunify/qingstor-csi) | `neonsan.csi.qingstor.com` | v0.3, v1.1 | A Container Storage Interface (CSI) Driver for NeonSAN storage system | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning\n[Quobyte](https://github.com/quobyte/quobyte-csi) | `quobyte-csi` | v0.2 | A Container Storage Interface (CSI) Driver for Quobyte | Persistent | Read/Write Multiple Pods | Yes | \n[ROBIN](https://get.robin.io/) | `robin` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for [ROBIN](https://docs.robin.io) | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning\n[SandStone](https://github.com/sandstone-storage/sandstone-csi-driver) | `csi-sandstone-plugin` | v1.0 | A Container Storage Interface (CSI) Driver for SandStone USP | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning \n[Sangfor-EDS-File-Storage](https://github.com/evan37717/sangfor-eds-csi) | `eds.csi.file.sangfor.com` | v1.0 | A Container Storage Interface (CSI) Driver for Sangfor Distributed File Storage(EDS) | Persistent | Read/Write Multiple Pods | Yes |\n[Sangfor-EDS-Block-Storage](https://github.com/eds-wzc/sangfor-eds-csi) | `eds.csi.block.sangfor.com` | v1.0 | A Container Storage Interface (CSI) Driver for Sangfor Block Storage(EDS) | Persistent | Read/Write Single Pod | Yes |\n[Scaleway CSI](https://github.com/scaleway/scaleway-csi) | `csi.scaleway.com` | v1.2.0 | Container Storage Interface (CSI) Driver for [Scaleway Block Storage](https://www.scaleway.com/block-storage/) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Topology\n[Seagate Exos X](https://github.com/Seagate/seagate-exos-x-csi) | `csi-exos-x.seagate.com` | v1.3 | CSI driver for [Seagate Exos X](https://www.seagate.com/products/storage/data-storage-systems/raid/) and OEM systems | Persistent | Read/Write Single Pod | Yes | Snapshot, Expansion, Cloning\n[SeaweedFS](https://github.com/seaweedfs/seaweedfs-csi-driver) | `seaweedfs-csi-driver` | v1.0 | A Container Storage Interface (CSI Driver for [SeaweedFS](https://github.com/chrislusf/seaweedfs)) | Persistent | Read/Write Multiple Pods | Yes |\n[Secrets Store CSI Driver](https://github.com/kubernetes-sigs/secrets-store-csi-driver) | `secrets-store.csi.k8s.io` | v0.0.10 | A Container Storage Interface (CSI) Driver for mounting secrets, keys, and certs stored in enterprise-grade external secrets stores as volumes. | Ephemeral | N/A | N/A | \n[SmartX](http://www.smartx.com/?locale=en) | `csi-smtx-plugin` | v1.0 | A Container Storage Interface (CSI) Driver for SmartX ZBS Storage  | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion\n[SODA](https://github.com/sodafoundation/nbp/tree/master/csi) | `csi-soda-plugin` | v1.0 | A Container Storage Interface (CSI) Driver for [SODA](https://sodafoundation.io/) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot\n[SPDK-CSI](https://github.com/spdk/spdk-csi) | `csi.spdk.io` | v1.1 | A Container Storage Interface (CSI) Driver for [SPDK](https://spdk.io/) | Persistent and Ephemeral | Read/Write Single Pod | Yes |\n[StorageOS](https://docs.storageos.com/docs/platforms/kubernetes/install/) | `storageos` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for [StorageOS](https://storageos.com/) | Persistent | Read/Write Multiple Pods | Yes |\n[Storidge](https://docs.storidge.com/kubernetes_storage/overview.html) | `csi.cio.storidge.com` | v0.3, v1.0 | A Container Storage Interface (CSI) Driver for [Storidge CIO](https://storidge.com/) | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion\n[StorPool](https://kb.storpool.com/storpool_integrations/github/kubernetes.html) | `csi-driver.storpool.com` | v1.0 | A Container Storage Interface (CSI) Driver for [StorPool](https://storpool.com/) | Persistent and Ephemeral | Read/Write Multiple Pods | Yes | Expansion\n[Synology](https://github.com/SynologyOpenSource/synology-csi) | `csi.san.synology.com` | v1.0 | A Container Storage Interface (CSI) Driver for Synology NAS | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning\n[Tencent Cloud Block Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)| `com.tencent.cloud.csi.cbs` | v1.0 | A Container Storage Interface (CSI) Driver for Tencent Cloud Block Storage | Persistent | Read/Write Single Pod | Yes | Snapshot\n[Tencent Cloud File Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)| `com.tencent.cloud.csi.cfs` | v1.0 | A Container Storage Interface (CSI) Driver for Tencent Cloud File Storage | Persistent | Read/Write Multiple Pods | Yes |\n[Tencent Cloud Object Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)| `com.tencent.cloud.csi.cosfs` | v1.0 | A Container Storage Interface (CSI) Driver for Tencent Cloud Object Storage | Persistent | Read/Write Multiple Pods | No |\n[TopoLVM](https://github.com/cybozu-go/topolvm)| `topolvm.cybozu.com` | v1.1 | A Container Storage Interface (CSI) Driver for LVM | Persistent and Ephemeral | Read/Write Single Pod | Yes | Raw Block, Expansion, Topology Aware\n[VAST Data](https://github.com/vast-data/vast-csi) | `csi.vastdata.com` | v1.0 | A Container Storage Interface (CSI) Driver for VAST Data | Persistent | Read/Write Multiple Pods | Yes |\n[XSKY-EBS](https://xsky-storage.github.io/xsky-csi-driver/csi-block.html) | `csi.block.xsky.com` | v1.0 | A Container Storage Interface (CSI) Driver for XSKY Distributed Block Storage (X-EBS) | Persistent | Read/Write Single Pod | Yes | Raw Block, Snapshot, Expansion, Cloning\n[XSKY-EUS](https://xsky-storage.github.io/xsky-csi-driver/csi-fs.html) | `csi.fs.xsky.com` | v1.0 | A Container Storage Interface (CSI) Driver for XSKY Distributed File Storage (X-EUS) | Persistent | Read/Write Multiple Pods | Yes | \n[Vault](https://github.com/kubevault/csi-driver) | `secrets.csi.kubevault.com` | v1.0 | A Container Storage Interface (CSI) Driver for mounting HashiCorp Vault secrets as volumes. | Ephemeral | N/A | N/A | \n[VDA](https://virtual-disk-array.readthedocs.io/en/latest/Introduction.html) | `csi.vda.io` | v1.0 | An open source block storage system base on SPDK | Persistent | Read/Write Single Pod | N/A |\n[Veritas InfoScale Volumes](https://www.veritas.com/solution/virtualization/containers.html) | `org.veritas.infoscale` | v1.2 | A Container Storage Interface (CSI) Driver for Veritas InfoScale volumes | Persistent | Read/Write Multiple Pods | Yes | Snapshot, Expansion, Cloning\n[vSphere](https://github.com/kubernetes-sigs/vsphere-csi-driver) | `csi.vsphere.vmware.com` | v1.0 | A Container Storage Interface (CSI) Driver for VMware vSphere | Persistent | Read/Write Single Pod (Block Volume) <br/><br/> Read/Write Multiple Pods (File Volume) | Yes | Raw Block,<br/><br/>Expansion (Block Volume),<br/><br/>Topology Aware (Block Volume)\n[Vultr Block Storage](https://github.com/vultr/vultr-csi) | `block.csi.vultr.com` | v1.2 | A Container Storage Interface (CSI) Driver for Vultr Block Storage | Persistent | Read/Write Single Pod | Yes | \n[WekaIO](https://github.com/weka/csi-wekafs) | `csi.weka.io` | v1.0 | A Container Storage Interface (CSI) Driver for mounting WekaIO WekaFS filesystem as volumes | Persistent | Read/Write Multiple Pods | Yes | \n[Yandex.Cloud](https://github.com/flant/yandex-csi-driver) | `yandex.csi.flant.com` | v1.2 | A Container Storage Interface (CSI) plugin for Yandex.Cloud Compute Disks  | Persistent | Read/Write Single Pod | Yes |\n[YanRongYun](http://www.yanrongyun.com/) | ? | v1.0 | A Container Storage Interface (CSI) Driver for YanRong YRCloudFile Storage  | Persistent | Read/Write Multiple Pods | Yes | \n[Zadara-CSI](https://github.com/zadarastorage/zadara-csi) | `csi.zadara.com` | v1.0, v1.1 | A Container Storage Interface (CSI) plugin for Zadara VPSA Storage Array & VPSA All-Flash  | Persistent | Read/Write Multiple Pods | Yes | Raw Block, Snapshot, Expansion, Cloning\n\n## Sample Drivers\n\nName | Status | More Information\n-----|--------|-------\n[Flexvolume](https://github.com/kubernetes-csi/csi-driver-flex) | Sample |\n[HostPath](https://github.com/kubernetes-csi/csi-driver-host-path) | v1.2.0 | Only use for a single node tests. See the [Example](example.html) page for Kubernetes-specific instructions.\n[ImagePopulator](https://github.com/kubernetes-csi/csi-driver-image-populator) | Prototype | Driver that lets you use a container image as an ephemeral volume.\n[In-memory Sample Mock Driver](https://github.com/kubernetes-csi/csi-test/tree/master/mock/service) | v0.3.0 | The sample mock driver used for [csi-sanity](https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity)\n[NFS](https://github.com/kubernetes-csi/csi-driver-nfs) | Sample |\n[Synology NAS](https://github.com/jparklab/synology-csi) | v1.0.0 | An unofficial (and unsupported) Container Storage Interface Driver for Synology NAS.\n[VFS Driver](https://github.com/thecodeteam/csi-vfs) | Released | A CSI plugin that provides a virtual file system.\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/kastenhq/kubestr\n\ngo 1.24\n\nreplace github.com/graymeta/stow => github.com/kastenhq/stow v0.1.2-kasten\n\nrequire (\n\tgithub.com/briandowns/spinner v1.23.2\n\tgithub.com/frankban/quicktest v1.14.6\n\tgithub.com/golang/mock v1.6.0\n\tgithub.com/kanisterio/kanister v0.0.0-20250106180853-0abc731c8242\n\tgithub.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0\n\tgithub.com/pkg/errors v0.9.1\n\tgithub.com/spf13/cobra v1.10.2\n\tgopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c\n\tk8s.io/api v0.31.4\n\tk8s.io/apimachinery v0.31.4\n\tk8s.io/client-go v0.31.4\n)\n\nrequire (\n\tgithub.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect\n\tgithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect\n\tgithub.com/MakeNowJust/heredoc v1.0.0 // indirect\n\tgithub.com/Masterminds/semver v1.5.0 // indirect\n\tgithub.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect\n\tgithub.com/blang/semver/v4 v4.0.0 // indirect\n\tgithub.com/chai2010/gettext-go v1.0.2 // indirect\n\tgithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect\n\tgithub.com/emicklei/go-restful/v3 v3.11.0 // indirect\n\tgithub.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect\n\tgithub.com/fatih/color v1.17.0 // indirect\n\tgithub.com/fxamacker/cbor/v2 v2.7.0 // indirect\n\tgithub.com/go-errors/errors v1.4.2 // indirect\n\tgithub.com/go-logr/logr v1.4.2 // indirect\n\tgithub.com/go-openapi/errors v0.22.0 // indirect\n\tgithub.com/go-openapi/jsonpointer v0.19.6 // indirect\n\tgithub.com/go-openapi/jsonreference v0.20.2 // indirect\n\tgithub.com/go-openapi/strfmt v0.23.0 // indirect\n\tgithub.com/go-openapi/swag v0.22.4 // indirect\n\tgithub.com/gofrs/uuid v4.4.0+incompatible // indirect\n\tgithub.com/gogo/protobuf v1.3.2 // indirect\n\tgithub.com/golang/protobuf v1.5.4 // indirect\n\tgithub.com/google/btree v1.0.1 // indirect\n\tgithub.com/google/gnostic-models v0.6.8 // indirect\n\tgithub.com/google/go-cmp v0.6.0 // indirect\n\tgithub.com/google/gofuzz v1.2.0 // indirect\n\tgithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect\n\tgithub.com/google/uuid v1.6.0 // indirect\n\tgithub.com/gorilla/websocket v1.5.0 // indirect\n\tgithub.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect\n\tgithub.com/imdario/mergo v0.3.12 // indirect\n\tgithub.com/inconshreveable/mousetrap v1.1.0 // indirect\n\tgithub.com/josharian/intern v1.0.0 // indirect\n\tgithub.com/jpillora/backoff v1.0.0 // indirect\n\tgithub.com/json-iterator/go v1.1.12 // indirect\n\tgithub.com/kanisterio/errkit v0.0.3 // indirect\n\tgithub.com/kr/pretty v0.3.1 // indirect\n\tgithub.com/kr/text v0.2.0 // indirect\n\tgithub.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect\n\tgithub.com/mailru/easyjson v0.7.7 // indirect\n\tgithub.com/mattn/go-colorable v0.1.13 // indirect\n\tgithub.com/mattn/go-isatty v0.0.20 // indirect\n\tgithub.com/mitchellh/go-wordwrap v1.0.1 // indirect\n\tgithub.com/mitchellh/mapstructure v1.5.0 // indirect\n\tgithub.com/moby/spdystream v0.5.1 // indirect\n\tgithub.com/moby/term v0.5.0 // indirect\n\tgithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect\n\tgithub.com/modern-go/reflect2 v1.0.2 // indirect\n\tgithub.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect\n\tgithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect\n\tgithub.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect\n\tgithub.com/oklog/ulid v1.3.1 // indirect\n\tgithub.com/openshift/api v0.0.0-20231222123017-053aee22b4b4 // indirect\n\tgithub.com/openshift/client-go v0.0.0-20231221125933-2aa81c72f992 // indirect\n\tgithub.com/peterbourgon/diskv v2.0.1+incompatible // indirect\n\tgithub.com/rogpeppe/go-internal v1.12.0 // indirect\n\tgithub.com/russross/blackfriday/v2 v2.1.0 // indirect\n\tgithub.com/sirupsen/logrus v1.9.3 // indirect\n\tgithub.com/spf13/pflag v1.0.9 // indirect\n\tgithub.com/x448/float16 v0.8.4 // indirect\n\tgithub.com/xlab/treeprint v1.2.0 // indirect\n\tgo.mongodb.org/mongo-driver v1.14.0 // indirect\n\tgo.starlark.net v0.0.0-20240314022150-ee8ed142361c // indirect\n\tgolang.org/x/mod v0.21.0 // indirect\n\tgolang.org/x/net v0.38.0 // indirect\n\tgolang.org/x/oauth2 v0.27.0 // indirect\n\tgolang.org/x/sync v0.12.0 // indirect\n\tgolang.org/x/sys v0.31.0 // indirect\n\tgolang.org/x/term v0.30.0 // indirect\n\tgolang.org/x/text v0.23.0 // indirect\n\tgolang.org/x/time v0.8.0 // indirect\n\tgolang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect\n\tgoogle.golang.org/protobuf v1.36.1 // indirect\n\tgopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect\n\tgopkg.in/inf.v0 v0.9.1 // indirect\n\tgopkg.in/yaml.v2 v2.4.0 // indirect\n\tgopkg.in/yaml.v3 v3.0.1 // indirect\n\tk8s.io/apiextensions-apiserver v0.31.4 // indirect\n\tk8s.io/cli-runtime v0.31.4 // indirect\n\tk8s.io/code-generator v0.31.4 // indirect\n\tk8s.io/component-base v0.31.4 // indirect\n\tk8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect\n\tk8s.io/klog/v2 v2.130.1 // indirect\n\tk8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect\n\tk8s.io/kubectl v0.31.4 // indirect\n\tk8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect\n\tsigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect\n\tsigs.k8s.io/kustomize/api v0.17.2 // indirect\n\tsigs.k8s.io/kustomize/kyaml v0.17.1 // indirect\n\tsigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect\n\tsigs.k8s.io/yaml v1.4.0 // indirect\n)\n"
  },
  {
    "path": "go.sum",
    "content": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=\ncloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=\ncloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=\ncloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=\ncloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=\ncloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=\ncloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE=\ncloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs=\ncloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q=\ncloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU=\ncloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=\ncloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=\ncloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=\ncloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=\ncloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=\ncloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=\ncloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=\ndmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=\ngithub.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=\ngithub.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=\ngithub.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=\ngithub.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=\ngithub.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=\ngithub.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=\ngithub.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=\ngithub.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=\ngithub.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ=\ngithub.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA=\ngithub.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions v1.3.0 h1:wxQx2Bt4xzPIKvW59WQf1tJNx/ZZKPfN+EhPX3Z6CYY=\ngithub.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions v1.3.0/go.mod h1:TpiwjwnW/khS0LKs4vW5UmmT9OWcxaveS8U7+tlknzo=\ngithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=\ngithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=\ngithub.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=\ngithub.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=\ngithub.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=\ngithub.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=\ngithub.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=\ngithub.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=\ngithub.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=\ngithub.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=\ngithub.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=\ngithub.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=\ngithub.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=\ngithub.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=\ngithub.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=\ngithub.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=\ngithub.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=\ngithub.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=\ngithub.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=\ngithub.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=\ngithub.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=\ngithub.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=\ngithub.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=\ngithub.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=\ngithub.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=\ngithub.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=\ngithub.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=\ngithub.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=\ngithub.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=\ngithub.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=\ngithub.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=\ngithub.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=\ngithub.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=\ngithub.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=\ngithub.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=\ngithub.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=\ngithub.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=\ngithub.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=\ngithub.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=\ngithub.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=\ngithub.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=\ngithub.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=\ngithub.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=\ngithub.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=\ngithub.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=\ngithub.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=\ngithub.com/briandowns/spinner v1.23.2 h1:Zc6ecUnI+YzLmJniCfDNaMbW0Wid1d5+qcTq4L2FW8w=\ngithub.com/briandowns/spinner v1.23.2/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=\ngithub.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA=\ngithub.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=\ngithub.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=\ngithub.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=\ngithub.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=\ngithub.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=\ngithub.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=\ngithub.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=\ngithub.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=\ngithub.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=\ngithub.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=\ngithub.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=\ngithub.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=\ngithub.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=\ngithub.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=\ngithub.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=\ngithub.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=\ngithub.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=\ngithub.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=\ngithub.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=\ngithub.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=\ngithub.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=\ngithub.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=\ngithub.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=\ngithub.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=\ngithub.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=\ngithub.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=\ngithub.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=\ngithub.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=\ngithub.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=\ngithub.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=\ngithub.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=\ngithub.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=\ngithub.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=\ngithub.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=\ngithub.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=\ngithub.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=\ngithub.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=\ngithub.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=\ngithub.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=\ngithub.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=\ngithub.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=\ngithub.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=\ngithub.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=\ngithub.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=\ngithub.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=\ngithub.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=\ngithub.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=\ngithub.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=\ngithub.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=\ngithub.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=\ngithub.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=\ngithub.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=\ngithub.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=\ngithub.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=\ngithub.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=\ngithub.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=\ngithub.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=\ngithub.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=\ngithub.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=\ngithub.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=\ngithub.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=\ngithub.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=\ngithub.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=\ngithub.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=\ngithub.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=\ngithub.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=\ngithub.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=\ngithub.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=\ngithub.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=\ngithub.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=\ngithub.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=\ngithub.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=\ngithub.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=\ngithub.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=\ngithub.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=\ngithub.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=\ngithub.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=\ngithub.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=\ngithub.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=\ngithub.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=\ngithub.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=\ngithub.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=\ngithub.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=\ngithub.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=\ngithub.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=\ngithub.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=\ngithub.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=\ngithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=\ngithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=\ngithub.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=\ngithub.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=\ngithub.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=\ngithub.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o=\ngithub.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk=\ngithub.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=\ngithub.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=\ngithub.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=\ngithub.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=\ngithub.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=\ngithub.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=\ngithub.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0=\ngithub.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=\ngithub.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=\ngithub.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=\ngithub.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=\ngithub.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=\ngithub.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=\ngithub.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=\ngithub.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=\ngithub.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=\ngithub.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=\ngithub.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=\ngithub.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=\ngithub.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=\ngithub.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=\ngithub.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=\ngithub.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=\ngithub.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=\ngithub.com/kanisterio/errkit v0.0.3 h1:1wHaTqV4DZE0XrN+Nq7Q2M8kyKnV8NhhEF3OB7A/Pd8=\ngithub.com/kanisterio/errkit v0.0.3/go.mod h1:0xesKaif6++1IXFdhb6fywa40J07odjwWq3IKzxWC3A=\ngithub.com/kanisterio/kanister v0.0.0-20250106180853-0abc731c8242 h1:Ubk92hHanqt0lWkw+AJD0HD/kxyNX099XgkLAAfxKQo=\ngithub.com/kanisterio/kanister v0.0.0-20250106180853-0abc731c8242/go.mod h1:GKGelgFnCa/Vc4MDuGlc4DdKGWaN7yIt8oI/Ztsm8V0=\ngithub.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=\ngithub.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=\ngithub.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=\ngithub.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 h1:nHHjmvjitIiyPlUHk/ofpgvBcNcawJLtf4PYHORLjAA=\ngithub.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4DoEeDndqvAn6eeu0vWM7QdXmHEeI9cFWplmBys=\ngithub.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=\ngithub.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=\ngithub.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=\ngithub.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=\ngithub.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=\ngithub.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=\ngithub.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=\ngithub.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=\ngithub.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=\ngithub.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=\ngithub.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=\ngithub.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=\ngithub.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=\ngithub.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=\ngithub.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=\ngithub.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=\ngithub.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=\ngithub.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=\ngithub.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=\ngithub.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=\ngithub.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=\ngithub.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=\ngithub.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=\ngithub.com/moby/spdystream v0.5.1 h1:9sNYeYZUcci9R6/w7KDaFWEWeV4LStVG78Mpyq/Zm/Y=\ngithub.com/moby/spdystream v0.5.1/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=\ngithub.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=\ngithub.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=\ngithub.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=\ngithub.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=\ngithub.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=\ngithub.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=\ngithub.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=\ngithub.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=\ngithub.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=\ngithub.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=\ngithub.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=\ngithub.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=\ngithub.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=\ngithub.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=\ngithub.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=\ngithub.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=\ngithub.com/openshift/api v0.0.0-20231222123017-053aee22b4b4 h1:XHl52N6/q+aE5qvmN3YyHyV2H0xepZTbr/r6Vs5pNjo=\ngithub.com/openshift/api v0.0.0-20231222123017-053aee22b4b4/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4=\ngithub.com/openshift/client-go v0.0.0-20231221125933-2aa81c72f992 h1:JQ/w7ublPBrPRwknrde4apbTR23PDxKYUmkkfo1Nvws=\ngithub.com/openshift/client-go v0.0.0-20231221125933-2aa81c72f992/go.mod h1:5W+xoimHjRdZ0dI/yeQR0ANRNLK9mPmXMzUWPAIPADo=\ngithub.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=\ngithub.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=\ngithub.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=\ngithub.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=\ngithub.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=\ngithub.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=\ngithub.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=\ngithub.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=\ngithub.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=\ngithub.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=\ngithub.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=\ngithub.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=\ngithub.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=\ngithub.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=\ngithub.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=\ngithub.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=\ngithub.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=\ngithub.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=\ngithub.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=\ngithub.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=\ngithub.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=\ngithub.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=\ngithub.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=\ngithub.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=\ngithub.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=\ngo.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=\ngo.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=\ngo.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=\ngo.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=\ngo.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=\ngo.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=\ngo.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=\ngo.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=\ngo.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=\ngo.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=\ngo.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=\ngo.starlark.net v0.0.0-20240314022150-ee8ed142361c h1:roAjH18hZcwI4hHStHbkXjF5b7UUyZ/0SG3hXNN1SjA=\ngo.starlark.net v0.0.0-20240314022150-ee8ed142361c/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8=\ngo.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=\ngo.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=\ngo.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=\ngolang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=\ngolang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=\ngolang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=\ngolang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=\ngolang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=\ngolang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=\ngolang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=\ngolang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=\ngolang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=\ngolang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=\ngolang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=\ngolang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=\ngolang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=\ngolang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=\ngolang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=\ngolang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=\ngolang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=\ngolang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=\ngolang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=\ngolang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=\ngolang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=\ngolang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngoogle.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=\ngoogle.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=\ngoogle.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.214.0 h1:h2Gkq07OYi6kusGOaT/9rnNljuXmqPnaig7WGPmKbwA=\ngoogle.golang.org/api v0.214.0/go.mod h1:bYPpLG8AyeMWwDU6NXoB00xC0DFkikVvd5MfwoxjLqE=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=\ngoogle.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=\ngoogle.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=\ngoogle.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=\ngoogle.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=\ngoogle.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=\ngoogle.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=\ngoogle.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=\ngoogle.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=\ngoogle.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=\ngoogle.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=\ngoogle.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=\ngoogle.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=\ngoogle.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=\ngopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=\ngopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=\ngopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=\ngopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=\ngopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=\ngopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=\ngopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=\nk8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw=\nk8s.io/api v0.31.4 h1:I2QNzitPVsPeLQvexMEsj945QumYraqv9m74isPDKhM=\nk8s.io/api v0.31.4/go.mod h1:d+7vgXLvmcdT1BCo79VEgJxHHryww3V5np2OYTr6jdw=\nk8s.io/apiextensions-apiserver v0.31.4 h1:FxbqzSvy92Ca9DIs5jqot883G0Ln/PGXfm/07t39LS0=\nk8s.io/apiextensions-apiserver v0.31.4/go.mod h1:hIW9YU8UsqZqIWGG99/gsdIU0Ar45Qd3A12QOe/rvpg=\nk8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=\nk8s.io/apimachinery v0.31.4 h1:8xjE2C4CzhYVm9DGf60yohpNUh5AEBnPxCryPBECmlM=\nk8s.io/apimachinery v0.31.4/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=\nk8s.io/cli-runtime v0.31.4 h1:iczCWiyXaotW+hyF5cWP8RnEYBCzZfJUF6otJ2m9mw0=\nk8s.io/cli-runtime v0.31.4/go.mod h1:0/pRzAH7qc0hWx40ut1R4jLqiy2w/KnbqdaAI2eFG8U=\nk8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU=\nk8s.io/client-go v0.31.4 h1:t4QEXt4jgHIkKKlx06+W3+1JOwAFU/2OPiOo7H92eRQ=\nk8s.io/client-go v0.31.4/go.mod h1:kvuMro4sFYIa8sulL5Gi5GFqUPvfH2O/dXuKstbaaeg=\nk8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=\nk8s.io/code-generator v0.31.4 h1:Vu+8fKz+239rKiVDHFVHgjQ162cg5iUQPtTyQbwXeQw=\nk8s.io/code-generator v0.31.4/go.mod h1:yMDt13Kn7m4MMZ4LxB1KBzdZjEyxzdT4b4qXq+lnI90=\nk8s.io/component-base v0.31.4 h1:wCquJh4ul9O8nNBSB8N/o8+gbfu3BVQkVw9jAUY/Qtw=\nk8s.io/component-base v0.31.4/go.mod h1:G4dgtf5BccwiDT9DdejK0qM6zTK0jwDGEKnCmb9+u/s=\nk8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=\nk8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=\nk8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=\nk8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=\nk8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=\nk8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=\nk8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=\nk8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=\nk8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=\nk8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=\nk8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=\nk8s.io/kubectl v0.31.4 h1:c8Af8xd1VjyoKyWMW0xHv2+tYxEjne8s6OOziMmaD10=\nk8s.io/kubectl v0.31.4/go.mod h1:0E0rpXg40Q57wRE6LB9su+4tmwx1IzZrmIEvhQPk0i4=\nk8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=\nk8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=\nk8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=\nrsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=\nsigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=\nsigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=\nsigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g=\nsigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0=\nsigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ=\nsigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U=\nsigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=\nsigs.k8s.io/structured-merge-diff/v4 v4.4.3 h1:sCP7Vv3xx/CWIuTPVN38lUPx0uw0lcLfzaiDa8Ja01A=\nsigs.k8s.io/structured-merge-diff/v4 v4.4.3/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=\nsigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=\nsigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=\nsigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=\nsigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=\n"
  },
  {
    "path": "index.md",
    "content": "# Kubestr\n\n## What is it?\n\nKubestr is a collection of tools to discover, validate and evaluate your kubernetes storage options.\n\nAs adoption of kubernetes grows so have the persistent storage offerings that are available to users. The introduction of [CSI](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/)(Container Storage Interface) has enabled storage providers to develop drivers with ease. In fact there are around a 100 different CSI drivers available today. Along with the existing in-tree providers, these options can make choosing the right storage difficult.\n\nKubestr can assist in the following ways-\n- Identify the various storage options present in a cluster.\n- Validate if the storage options are configured correctly.\n- Evaluate the storage using common benchmarking tools like FIO.\n\n<script id=\"asciicast-7iJTbWKwdhPHNWYV00LIgx7gn\" src=\"https://asciinema.org/a/7iJTbWKwdhPHNWYV00LIgx7gn.js\" async></script>\n\n## Using Kubestr\n### To install the tool -  \n- Ensure that the kubernetes context is set and the cluster is accessible through your terminal. (Does [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) work?)\n- Download the latest release [here](https://github.com/kastenhq/kubestr/releases/latest). \n- Unpack the tool and make it an executable `chmod +x kubestr`.\n\n### To discover available storage options -\n- Run `./kubestr`\n\n### To run an FIO test - \n- Run `./kubestr fio -s <storage class>`\n- Additional options like `--size` and `--fiofile` can be specified.\n- For more information visit our [fio](https://kastenhq.github.io/kubestr/fio) page.\n\n### To check a CSI drivers snapshot and restore capabilities - \n- Run `./kubestr csicheck -s <storage class> -v <volume snapshot class>`\n\n## Roadmap\n- In the future we plan to allow users to post their FIO results and compare to others.\n"
  },
  {
    "path": "main.go",
    "content": "// Copyright 2020 Kubestr Developers\n\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n\n// \thttp://www.apache.org/licenses/LICENSE-2.0\n\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage main\n\n//go:generate ./scripts/load_csi_provisioners.sh\n\nimport (\n\t\"github.com/kastenhq/kubestr/cmd\"\n\t\"os\"\n)\n\nfunc main() {\n\tif err := Execute(); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\n// Execute executes the main command\nfunc Execute() error {\n\treturn cmd.Execute()\n}\n"
  },
  {
    "path": "pkg/block/block_mount.go",
    "content": "package block\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\tkankube \"github.com/kanisterio/kanister/pkg/kube\"\n\t\"github.com/kanisterio/kanister/pkg/poll\"\n\t\"github.com/kastenhq/kubestr/pkg/csi\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\t\"k8s.io/client-go/dynamic\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\ntype BlockMountCheckerArgs struct {\n\tKubeCli kubernetes.Interface\n\tDynCli  dynamic.Interface\n\n\tStorageClass          string\n\tNamespace             string\n\tCleanup               bool\n\tRunAsUser             int64\n\tContainerImage        string\n\tK8sObjectReadyTimeout time.Duration\n\tPVCSize               string\n}\n\nfunc (a *BlockMountCheckerArgs) Validate() error {\n\tif a.KubeCli == nil || a.DynCli == nil || a.StorageClass == \"\" || a.Namespace == \"\" {\n\t\treturn fmt.Errorf(\"require fields are missing. (KubeCli, DynCli, StorageClass, Namespace)\")\n\t}\n\treturn nil\n\n}\n\n// BlockMountChecker tests if a storage class can provision volumes for block mounts.\ntype BlockMountChecker interface {\n\tMount(ctx context.Context) (*BlockMountCheckerResult, error)\n\tCleanup()\n}\n\ntype BlockMountCheckerResult struct {\n\tStorageClass *sv1.StorageClass\n}\n\nconst (\n\tblockMountCheckerPVCNameFmt = \"kubestr-blockmount-%s-pvc\"\n\tblockMountCheckerPodNameFmt = \"kubestr-blockmount-%s-pod\"\n\n\tblockModeCheckerPodCleanupTimeout = time.Second * 120\n\tblockModeCheckerPVCCleanupTimeout = time.Second * 120\n\tblockModeCheckerPVCDefaultSize    = \"1Gi\"\n)\n\n// blockMountChecker provides BlockMountChecker\ntype blockMountChecker struct {\n\targs              BlockMountCheckerArgs\n\tpodName           string\n\tpvcName           string\n\tvalidator         csi.ArgumentValidator\n\tappCreator        csi.ApplicationCreator\n\tcleaner           csi.Cleaner\n\tpodCleanupTimeout time.Duration\n\tpvcCleanupTimeout time.Duration\n}\n\nfunc NewBlockMountChecker(args BlockMountCheckerArgs) (BlockMountChecker, error) {\n\tif err := args.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := &blockMountChecker{}\n\tb.args = args\n\tb.podName = fmt.Sprintf(blockMountCheckerPodNameFmt, b.args.StorageClass)\n\tb.pvcName = fmt.Sprintf(blockMountCheckerPVCNameFmt, b.args.StorageClass)\n\tb.validator = csi.NewArgumentValidator(b.args.KubeCli, b.args.DynCli)\n\tb.appCreator = csi.NewApplicationCreator(b.args.KubeCli, args.K8sObjectReadyTimeout)\n\tb.cleaner = csi.NewCleaner(b.args.KubeCli, b.args.DynCli)\n\tb.podCleanupTimeout = blockModeCheckerPodCleanupTimeout\n\tb.pvcCleanupTimeout = blockModeCheckerPVCCleanupTimeout\n\n\treturn b, nil\n}\n\nfunc (b *blockMountChecker) Mount(ctx context.Context) (*BlockMountCheckerResult, error) {\n\tfmt.Printf(\"Fetching StorageClass %s ...\\n\", b.args.StorageClass)\n\tsc, err := b.validator.ValidateStorageClass(ctx, b.args.StorageClass)\n\tif err != nil {\n\t\tfmt.Printf(\" -> Failed to fetch StorageClass(%s): (%v)\\n\", b.args.StorageClass, err)\n\t\treturn nil, err\n\t}\n\n\tfmt.Printf(\" -> Provisioner: %s\\n\", sc.Provisioner)\n\n\tif b.args.PVCSize == \"\" {\n\t\tb.args.PVCSize = blockModeCheckerPVCDefaultSize\n\t}\n\n\trestoreSize, err := resource.ParseQuantity(b.args.PVCSize)\n\tif err != nil {\n\t\tfmt.Printf(\" -> Invalid PVC size %s: (%v)\\n\", b.args.PVCSize, err)\n\t\treturn nil, err\n\t}\n\n\tblockMode := v1.PersistentVolumeBlock\n\tcreatePVCArgs := &types.CreatePVCArgs{\n\t\tName:         b.pvcName,\n\t\tNamespace:    b.args.Namespace,\n\t\tStorageClass: b.args.StorageClass,\n\t\tVolumeMode:   &blockMode,\n\t\tRestoreSize:  &restoreSize,\n\t}\n\n\tif b.args.Cleanup {\n\t\tdefer b.Cleanup()\n\t}\n\n\tfmt.Printf(\"Provisioning a Volume (%s) for block mode access ...\\n\", b.args.PVCSize)\n\ttB := time.Now()\n\t_, err = b.appCreator.CreatePVC(ctx, createPVCArgs)\n\tif err != nil {\n\t\tfmt.Printf(\" -> Failed to provision a Volume (%v)\\n\", err)\n\t\treturn nil, err\n\t}\n\tfmt.Printf(\" -> Created PVC %s/%s (%s)\\n\", b.args.Namespace, b.pvcName, time.Since(tB).Truncate(time.Millisecond).String())\n\n\tfmt.Println(\"Creating a Pod with a volumeDevice ...\")\n\ttB = time.Now()\n\t_, err = b.appCreator.CreatePod(ctx, &types.CreatePodArgs{\n\t\tName:           b.podName,\n\t\tNamespace:      b.args.Namespace,\n\t\tRunAsUser:      b.args.RunAsUser,\n\t\tContainerImage: b.args.ContainerImage,\n\t\tCommand:        []string{\"/bin/sh\"},\n\t\tContainerArgs:  []string{\"-c\", \"tail -f /dev/null\"},\n\t\tPVCMap: map[string]types.VolumePath{\n\t\t\tb.pvcName: {\n\t\t\t\tDevicePath: \"/mnt/block\",\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\" -> Failed to create Pod (%v)\\n\", err)\n\t\treturn nil, err\n\t}\n\tfmt.Printf(\" -> Created Pod %s/%s\\n\", b.args.Namespace, b.podName)\n\n\tfmt.Printf(\" -> Waiting at most %s for the Pod to become ready ...\\n\", b.args.K8sObjectReadyTimeout.String())\n\tif err = b.appCreator.WaitForPodReady(ctx, b.args.Namespace, b.podName); err != nil {\n\t\tfmt.Printf(\" -> The Pod timed out (%v)\\n\", err)\n\t\treturn nil, err\n\t}\n\tfmt.Printf(\" -> The Pod is ready (%s)\\n\", time.Since(tB).Truncate(time.Millisecond).String())\n\n\treturn &BlockMountCheckerResult{\n\t\tStorageClass: sc,\n\t}, nil\n}\n\nfunc (b *blockMountChecker) Cleanup() {\n\tvar (\n\t\tctx = context.Background()\n\t\terr error\n\t)\n\n\t// delete Pod\n\tfmt.Printf(\"Deleting Pod %s/%s ...\\n\", b.args.Namespace, b.podName)\n\ttB := time.Now()\n\terr = b.cleaner.DeletePod(ctx, b.podName, b.args.Namespace)\n\tif err != nil && !apierrors.IsNotFound(err) {\n\t\tfmt.Printf(\"  Error deleting Pod %s/%s - (%v)\\n\", b.args.Namespace, b.podName, err)\n\t}\n\n\t// Give it a chance to run ...\n\tpodWaitCtx, podWaitCancelFn := context.WithTimeout(context.Background(), b.podCleanupTimeout)\n\tdefer podWaitCancelFn()\n\terr = kankube.WaitForPodCompletion(podWaitCtx, b.args.KubeCli, b.args.Namespace, b.podName)\n\tif err == nil || (err != nil && apierrors.IsNotFound(err)) {\n\t\tfmt.Printf(\" -> Deleted pod (%s)\\n\", time.Since(tB).Truncate(time.Millisecond).String())\n\t} else {\n\t\tfmt.Printf(\" -> Failed to delete Pod in %s\\n\", time.Since(tB).Truncate(time.Millisecond).String())\n\t}\n\n\t// delete PVC\n\tfmt.Printf(\"Deleting PVC %s/%s ...\\n\", b.args.Namespace, b.pvcName)\n\ttB = time.Now()\n\terr = b.cleaner.DeletePVC(ctx, b.pvcName, b.args.Namespace)\n\tif err != nil && !apierrors.IsNotFound(err) {\n\t\tfmt.Printf(\"  Error deleting PVC %s/%s - (%v)\\n\", b.args.Namespace, b.pvcName, err)\n\t}\n\n\terr = b.pvcWaitForTermination(b.pvcCleanupTimeout)\n\tif err != nil {\n\t\tfmt.Printf(\" -> PVC failed to delete in %s\\n\", time.Since(tB).Truncate(time.Millisecond).String())\n\t} else {\n\t\tfmt.Printf(\" -> Deleted PVC (%s)\\n\", time.Since(tB).Truncate(time.Millisecond).String())\n\t}\n}\n\nfunc (b *blockMountChecker) pvcWaitForTermination(timeout time.Duration) error {\n\tpvcWaitCtx, pvcWaitCancelFn := context.WithTimeout(context.Background(), timeout)\n\tdefer pvcWaitCancelFn()\n\n\treturn poll.Wait(pvcWaitCtx, func(ctx context.Context) (bool, error) {\n\t\t_, err := b.validator.ValidatePVC(ctx, b.pvcName, b.args.Namespace)\n\t\tif err != nil && apierrors.IsNotFound(err) {\n\t\t\treturn true, nil\n\t\t}\n\n\t\treturn false, nil\n\t})\n}\n"
  },
  {
    "path": "pkg/block/block_mount_test.go",
    "content": "package block\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\tqt \"github.com/frankban/quicktest\"\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/mocks\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\tapierrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\tfakedynamic \"k8s.io/client-go/dynamic/fake\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\nfunc TestBlockMountCheckerNew(t *testing.T) {\n\tkubeCli := fake.NewSimpleClientset()\n\tdynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme())\n\n\tinvalidArgs := []struct {\n\t\tname string\n\t\targs BlockMountCheckerArgs\n\t}{\n\t\t{\"args:empty\", BlockMountCheckerArgs{}},\n\t\t{\"args:KubeCli\", BlockMountCheckerArgs{\n\t\t\tKubeCli: kubeCli,\n\t\t}},\n\t\t{\"args:KubeCli-DynCli\", BlockMountCheckerArgs{\n\t\t\tKubeCli: kubeCli,\n\t\t\tDynCli:  dynCli,\n\t\t}},\n\t\t{\"args:KubeCli-DynCli-StorageClass\", BlockMountCheckerArgs{\n\t\t\tKubeCli:      kubeCli,\n\t\t\tDynCli:       dynCli,\n\t\t\tStorageClass: \"sc\",\n\t\t}},\n\t}\n\tfor _, tc := range invalidArgs {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tc := qt.New(t)\n\t\t\tbmt, err := NewBlockMountChecker(tc.args)\n\t\t\tc.Assert(err, qt.IsNotNil)\n\t\t\tc.Assert(bmt, qt.IsNil)\n\t\t})\n\t}\n\n\tt.Run(\"success\", func(t *testing.T) {\n\t\tc := qt.New(t)\n\t\targs := BlockMountCheckerArgs{\n\t\t\tKubeCli:      kubeCli,\n\t\t\tDynCli:       dynCli,\n\t\t\tStorageClass: \"sc\",\n\t\t\tNamespace:    \"namespace\",\n\t\t}\n\t\tbmt, err := NewBlockMountChecker(args)\n\t\tc.Assert(err, qt.IsNil)\n\t\tc.Assert(bmt, qt.IsNotNil)\n\n\t\tb, ok := bmt.(*blockMountChecker)\n\t\tc.Assert(ok, qt.IsTrue)\n\n\t\tc.Assert(b.args, qt.Equals, args)\n\t\tc.Assert(b.validator, qt.IsNotNil)\n\t\tc.Assert(b.appCreator, qt.IsNotNil)\n\t\tc.Assert(b.cleaner, qt.IsNotNil)\n\t\tc.Assert(b.podName, qt.Equals, fmt.Sprintf(blockMountCheckerPodNameFmt, args.StorageClass))\n\t\tc.Assert(b.pvcName, qt.Equals, fmt.Sprintf(blockMountCheckerPVCNameFmt, args.StorageClass))\n\t\tc.Assert(b.podCleanupTimeout, qt.Equals, blockModeCheckerPodCleanupTimeout)\n\t\tc.Assert(b.pvcCleanupTimeout, qt.Equals, blockModeCheckerPVCCleanupTimeout)\n\t})\n}\n\nfunc TestBlockMountCheckerPvcWaitForTermination(t *testing.T) {\n\ttype prepareArgs struct {\n\t\tb             *blockMountChecker\n\t\tmockValidator *mocks.MockArgumentValidator\n\t}\n\n\tkubeCli := fake.NewSimpleClientset()\n\tdynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme())\n\n\ttcs := []struct {\n\t\tname       string\n\t\tpvcTimeout time.Duration\n\t\tprepare    func(*prepareArgs)\n\t\texpErr     error\n\t}{\n\t\t{\n\t\t\tname:       \"success\",\n\t\t\tpvcTimeout: time.Hour,\n\t\t\tprepare: func(pa *prepareArgs) {\n\t\t\t\tpa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, apierrors.NewNotFound(schema.GroupResource{}, \"\"))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"timeout\",\n\t\t\tpvcTimeout: time.Microsecond, // pvc wait will timeout\n\t\t\tprepare: func(pa *prepareArgs) {\n\t\t\t\tpa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(&v1.PersistentVolumeClaim{}, nil).AnyTimes()\n\t\t\t},\n\t\t\texpErr: context.DeadlineExceeded,\n\t\t},\n\t}\n\tfor _, tc := range tcs {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tc := qt.New(t)\n\n\t\t\targs := BlockMountCheckerArgs{\n\t\t\t\tKubeCli:      kubeCli,\n\t\t\t\tDynCli:       dynCli,\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tNamespace:    \"namespace\",\n\t\t\t}\n\t\t\tbmt, err := NewBlockMountChecker(args)\n\t\t\tc.Assert(err, qt.IsNil)\n\t\t\tc.Assert(bmt, qt.IsNotNil)\n\t\t\tb, ok := bmt.(*blockMountChecker)\n\t\t\tc.Assert(ok, qt.IsTrue)\n\n\t\t\tctrl := gomock.NewController(t)\n\t\t\tdefer ctrl.Finish()\n\n\t\t\tpa := &prepareArgs{\n\t\t\t\tb:             b,\n\t\t\t\tmockValidator: mocks.NewMockArgumentValidator(ctrl),\n\t\t\t}\n\t\t\ttc.prepare(pa)\n\t\t\tb.validator = pa.mockValidator\n\n\t\t\terr = b.pvcWaitForTermination(tc.pvcTimeout)\n\n\t\t\tif tc.expErr != nil {\n\t\t\t\tc.Assert(err, qt.ErrorIs, tc.expErr)\n\t\t\t} else {\n\t\t\t\tc.Assert(err, qt.IsNil)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestBlockMountCheckerCleanup(t *testing.T) {\n\ttype prepareArgs struct {\n\t\tb             *blockMountChecker\n\t\tmockCleaner   *mocks.MockCleaner\n\t\tmockValidator *mocks.MockArgumentValidator\n\t}\n\n\terrNotFound := apierrors.NewNotFound(schema.GroupResource{}, \"\")\n\tsomeError := errors.New(\"test error\")\n\tscName := \"sc\"\n\tnamespace := \"namespace\"\n\trunningPod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      fmt.Sprintf(blockMountCheckerPodNameFmt, scName),\n\t\t\tNamespace: namespace,\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{\n\t\t\t\t{Name: \"container-0\"},\n\t\t\t},\n\t\t},\n\t\tStatus: v1.PodStatus{\n\t\t\tPhase: v1.PodRunning,\n\t\t},\n\t}\n\n\ttcs := []struct {\n\t\tname       string\n\t\tpodTimeout time.Duration\n\t\tpvcTimeout time.Duration\n\t\tobjs       []runtime.Object\n\t\tprepare    func(*prepareArgs)\n\t}{\n\t\t{\n\t\t\tname:       \"nothing-found\",\n\t\t\tpodTimeout: time.Hour,\n\t\t\tpvcTimeout: time.Hour,\n\t\t\tprepare: func(pa *prepareArgs) {\n\t\t\t\tpa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(errNotFound)\n\t\t\t\tpa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(errNotFound)\n\t\t\t\tpa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, errNotFound)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"error-deleting-pod\",\n\t\t\tpodTimeout: time.Microsecond, // pod wait will timeout\n\t\t\tpvcTimeout: time.Hour,\n\t\t\tobjs:       []runtime.Object{runningPod},\n\t\t\tprepare: func(pa *prepareArgs) {\n\t\t\t\tpa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(someError)\n\t\t\t\tpa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(errNotFound)\n\t\t\t\tpa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, errNotFound)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"error-deleting-pvc\",\n\t\t\tpodTimeout: time.Hour,\n\t\t\tpvcTimeout: time.Microsecond, // timeout\n\t\t\tprepare: func(pa *prepareArgs) {\n\t\t\t\tpa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(errNotFound)\n\t\t\t\tpa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(someError)\n\t\t\t\tpa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, someError).AnyTimes()\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tc := range tcs {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tc := qt.New(t)\n\n\t\t\tkubeCli := fake.NewSimpleClientset(tc.objs...)\n\t\t\tdynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme())\n\t\t\targs := BlockMountCheckerArgs{\n\t\t\t\tKubeCli:      kubeCli,\n\t\t\t\tDynCli:       dynCli,\n\t\t\t\tStorageClass: scName,\n\t\t\t\tNamespace:    namespace,\n\t\t\t}\n\t\t\tbmt, err := NewBlockMountChecker(args)\n\t\t\tc.Assert(err, qt.IsNil)\n\t\t\tc.Assert(bmt, qt.IsNotNil)\n\t\t\tb, ok := bmt.(*blockMountChecker)\n\t\t\tc.Assert(ok, qt.IsTrue)\n\n\t\t\tctrl := gomock.NewController(t)\n\t\t\tdefer ctrl.Finish()\n\n\t\t\tpa := &prepareArgs{\n\t\t\t\tb:             b,\n\t\t\t\tmockCleaner:   mocks.NewMockCleaner(ctrl),\n\t\t\t\tmockValidator: mocks.NewMockArgumentValidator(ctrl),\n\t\t\t}\n\t\t\ttc.prepare(pa)\n\t\t\tb.validator = pa.mockValidator\n\t\t\tb.cleaner = pa.mockCleaner\n\t\t\tb.podCleanupTimeout = tc.podTimeout\n\t\t\tb.pvcCleanupTimeout = tc.pvcTimeout\n\n\t\t\tb.Cleanup()\n\t\t})\n\t}\n}\n\nfunc TestBlockMountCheckerMount(t *testing.T) {\n\ttype prepareArgs struct {\n\t\tb              *blockMountChecker\n\t\tmockCleaner    *mocks.MockCleaner\n\t\tmockValidator  *mocks.MockArgumentValidator\n\t\tmockAppCreator *mocks.MockApplicationCreator\n\t}\n\n\terrNotFound := apierrors.NewNotFound(schema.GroupResource{}, \"\")\n\tsomeError := errors.New(\"test error\")\n\tscName := \"sc\"\n\tscProvisioner := \"provisioenr\"\n\tsc := &sv1.StorageClass{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: scName,\n\t\t},\n\t\tProvisioner: scProvisioner,\n\t}\n\tnamespace := \"namespace\"\n\tcleanupCalls := func(pa *prepareArgs) {\n\t\tpa.mockCleaner.EXPECT().DeletePod(gomock.Any(), pa.b.podName, pa.b.args.Namespace).Return(errNotFound)\n\t\tpa.mockCleaner.EXPECT().DeletePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(errNotFound)\n\t\tpa.mockValidator.EXPECT().ValidatePVC(gomock.Any(), pa.b.pvcName, pa.b.args.Namespace).Return(nil, errNotFound)\n\t}\n\tcreatePVCArgs := func(b *blockMountChecker) *types.CreatePVCArgs {\n\t\tpvcSize := b.args.PVCSize\n\t\tif pvcSize == \"\" {\n\t\t\tpvcSize = blockModeCheckerPVCDefaultSize\n\t\t}\n\t\trestoreSize := resource.MustParse(pvcSize)\n\t\tblockMode := v1.PersistentVolumeBlock\n\t\treturn &types.CreatePVCArgs{\n\t\t\tName:         b.pvcName,\n\t\t\tNamespace:    b.args.Namespace,\n\t\t\tStorageClass: b.args.StorageClass,\n\t\t\tVolumeMode:   &blockMode,\n\t\t\tRestoreSize:  &restoreSize,\n\t\t}\n\t}\n\tcreatePVC := func(b *blockMountChecker) *v1.PersistentVolumeClaim {\n\t\treturn &v1.PersistentVolumeClaim{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: b.args.Namespace,\n\t\t\t\tName:      b.pvcName,\n\t\t\t},\n\t\t}\n\t}\n\tcreatePodArgs := func(b *blockMountChecker) *types.CreatePodArgs {\n\t\treturn &types.CreatePodArgs{\n\t\t\tName:           b.podName,\n\t\t\tNamespace:      b.args.Namespace,\n\t\t\tRunAsUser:      b.args.RunAsUser,\n\t\t\tContainerImage: b.args.ContainerImage,\n\t\t\tCommand:        []string{\"/bin/sh\"},\n\t\t\tContainerArgs:  []string{\"-c\", \"tail -f /dev/null\"},\n\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\tb.pvcName: {\n\t\t\t\t\tDevicePath: \"/mnt/block\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\tcreatePod := func(b *blockMountChecker) *v1.Pod {\n\t\treturn &v1.Pod{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tNamespace: b.args.Namespace,\n\t\t\t\tName:      b.podName,\n\t\t\t},\n\t\t}\n\t}\n\n\ttcs := []struct {\n\t\tname       string\n\t\tpodTimeout time.Duration\n\t\tpvcTimeout time.Duration\n\t\tnoCleanup  bool\n\t\tobjs       []runtime.Object\n\t\tprepare    func(*prepareArgs)\n\t\tresult     *BlockMountCheckerResult\n\t}{\n\t\t{\n\t\t\tname:       \"no-storage-class\",\n\t\t\tpodTimeout: time.Hour,\n\t\t\tpvcTimeout: time.Hour,\n\t\t\tprepare: func(pa *prepareArgs) {\n\t\t\t\tpa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(nil, apierrors.NewNotFound(schema.GroupResource{}, pa.b.args.StorageClass))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"invalid-pvc-size\",\n\t\t\tpodTimeout: time.Hour,\n\t\t\tpvcTimeout: time.Hour,\n\t\t\tprepare: func(pa *prepareArgs) {\n\t\t\t\tpa.b.args.PVCSize = \"10Q\"\n\t\t\t\tpa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"create-pvc-error\",\n\t\t\tpodTimeout: time.Hour,\n\t\t\tpvcTimeout: time.Hour,\n\t\t\tprepare: func(pa *prepareArgs) {\n\t\t\t\tpa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil)\n\t\t\t\tpa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(nil, someError)\n\t\t\t\tcleanupCalls(pa)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"create-pod-error\",\n\t\t\tpodTimeout: time.Hour,\n\t\t\tpvcTimeout: time.Hour,\n\t\t\tprepare: func(pa *prepareArgs) {\n\t\t\t\tpa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil)\n\t\t\t\tpa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(createPVC(pa.b), nil)\n\t\t\t\tpa.mockAppCreator.EXPECT().CreatePod(gomock.Any(), createPodArgs(pa.b)).Return(nil, someError)\n\t\t\t\tcleanupCalls(pa)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"wait-for-pod-error\",\n\t\t\tpodTimeout: time.Hour,\n\t\t\tpvcTimeout: time.Hour,\n\t\t\tprepare: func(pa *prepareArgs) {\n\t\t\t\tpa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil)\n\t\t\t\tpa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(createPVC(pa.b), nil)\n\t\t\t\tpa.mockAppCreator.EXPECT().CreatePod(gomock.Any(), createPodArgs(pa.b)).Return(createPod(pa.b), nil)\n\t\t\t\tpa.mockAppCreator.EXPECT().WaitForPodReady(gomock.Any(), pa.b.args.Namespace, pa.b.podName).Return(someError)\n\t\t\t\tcleanupCalls(pa)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"success-no-cleanup\",\n\t\t\tpodTimeout: time.Hour,\n\t\t\tpvcTimeout: time.Hour,\n\t\t\tnoCleanup:  true,\n\t\t\tprepare: func(pa *prepareArgs) {\n\t\t\t\tpa.mockValidator.EXPECT().ValidateStorageClass(gomock.Any(), pa.b.args.StorageClass).Return(sc, nil)\n\t\t\t\tpa.b.args.PVCSize = blockModeCheckerPVCDefaultSize\n\t\t\t\tpa.mockAppCreator.EXPECT().CreatePVC(gomock.Any(), createPVCArgs(pa.b)).Return(createPVC(pa.b), nil)\n\t\t\t\tpa.mockAppCreator.EXPECT().CreatePod(gomock.Any(), createPodArgs(pa.b)).Return(createPod(pa.b), nil)\n\t\t\t\tpa.mockAppCreator.EXPECT().WaitForPodReady(gomock.Any(), pa.b.args.Namespace, pa.b.podName).Return(nil)\n\t\t\t},\n\t\t\tresult: &BlockMountCheckerResult{\n\t\t\t\tStorageClass: sc,\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tc := range tcs {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tc := qt.New(t)\n\t\t\tctx := context.Background()\n\n\t\t\tkubeCli := fake.NewSimpleClientset(tc.objs...)\n\t\t\tdynCli := fakedynamic.NewSimpleDynamicClient(runtime.NewScheme())\n\t\t\targs := BlockMountCheckerArgs{\n\t\t\t\tKubeCli:      kubeCli,\n\t\t\t\tDynCli:       dynCli,\n\t\t\t\tStorageClass: scName,\n\t\t\t\tNamespace:    namespace,\n\t\t\t\tCleanup:      !tc.noCleanup,\n\t\t\t}\n\t\t\tbmt, err := NewBlockMountChecker(args)\n\t\t\tc.Assert(err, qt.IsNil)\n\t\t\tc.Assert(bmt, qt.IsNotNil)\n\t\t\tb, ok := bmt.(*blockMountChecker)\n\t\t\tc.Assert(ok, qt.IsTrue)\n\n\t\t\tctrl := gomock.NewController(t)\n\t\t\tdefer ctrl.Finish()\n\n\t\t\tpa := &prepareArgs{\n\t\t\t\tb:              b,\n\t\t\t\tmockCleaner:    mocks.NewMockCleaner(ctrl),\n\t\t\t\tmockValidator:  mocks.NewMockArgumentValidator(ctrl),\n\t\t\t\tmockAppCreator: mocks.NewMockApplicationCreator(ctrl),\n\t\t\t}\n\t\t\ttc.prepare(pa)\n\t\t\tb.validator = pa.mockValidator\n\t\t\tb.cleaner = pa.mockCleaner\n\t\t\tb.appCreator = pa.mockAppCreator\n\t\t\tb.podCleanupTimeout = tc.podTimeout\n\t\t\tb.pvcCleanupTimeout = tc.pvcTimeout\n\n\t\t\tresult, err := b.Mount(ctx)\n\t\t\tif tc.result != nil {\n\t\t\t\tc.Assert(result, qt.DeepEquals, tc.result)\n\t\t\t\tc.Assert(err, qt.IsNil)\n\t\t\t} else {\n\t\t\t\tc.Assert(result, qt.IsNil)\n\t\t\t\tc.Assert(err, qt.IsNotNil)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/common/common.go",
    "content": "package common\n\nconst (\n\t// VolSnapClassDriverKey describes the driver key in VolumeSnapshotClass resource\n\tVolSnapClassDriverKey = \"driver\"\n\t// DefaultPodImage the default pod image\n\tDefaultPodImage = \"ghcr.io/kastenhq/kubestr:latest\"\n\t// SnapGroupName describes the snapshot group name\n\tSnapGroupName = \"snapshot.storage.k8s.io\"\n\t// VolumeSnapshotClassResourcePlural  describes volume snapshot classses\n\tVolumeSnapshotClassResourcePlural = \"volumesnapshotclasses\"\n\t// VolumeSnapshotResourcePlural is \"volumesnapshots\"\n\tVolumeSnapshotResourcePlural = \"volumesnapshots\"\n\t// SnapshotVersion is the apiversion of the VolumeSnapshot resource\n\tSnapshotVersion = \"snapshot.storage.k8s.io/v1\"\n)\n"
  },
  {
    "path": "pkg/csi/csi.go",
    "content": "package csi\n\nimport (\n\t\"context\"\n\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n)\n\ntype CSI interface {\n\tRunSnapshotRestore(ctx context.Context, args *types.CSISnapshotRestoreArgs) (*types.CSISnapshotRestoreResults, error)\n}\n"
  },
  {
    "path": "pkg/csi/csi_ops.go",
    "content": "package csi\n\n// This file contains general Kubernetes operations, not just CSI related operations.\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\n\t\"github.com/kanisterio/kanister/pkg/kube\"\n\tkansnapshot \"github.com/kanisterio/kanister/pkg/kube/snapshot\"\n\t\"github.com/kanisterio/kanister/pkg/poll\"\n\t\"github.com/kastenhq/kubestr/pkg/common\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t\"github.com/pkg/errors\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/fields\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/client-go/dynamic\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/rest\"\n\tpf \"k8s.io/client-go/tools/portforward\"\n\t\"k8s.io/client-go/transport/spdy\"\n)\n\nconst (\n\tdefaultReadyWaitTimeout = 2 * time.Minute\n\n\tPVCKind = \"PersistentVolumeClaim\"\n\tPodKind = \"Pod\"\n\n\t// DefaultVolumeSnapshotClassAnnotation is an annotation used to denote a default VolumeSnapshotClass.\n\tDefaultVolumeSnapshotClassAnnotation = \"snapshot.storage.kubernetes.io/is-default-class\"\n)\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_argument_validator.go -package=mocks . ArgumentValidator\ntype ArgumentValidator interface {\n\t//Rename\n\tValidatePVC(ctx context.Context, pvcName, namespace string) (*v1.PersistentVolumeClaim, error)\n\tFetchPV(ctx context.Context, pvName string) (*v1.PersistentVolume, error)\n\tValidateVolumeSnapshot(ctx context.Context, snapshotName, namespace string, groupVersion *metav1.GroupVersionForDiscovery) (*snapv1.VolumeSnapshot, error)\n\tValidateNamespace(ctx context.Context, namespace string) error\n\tValidateStorageClass(ctx context.Context, storageClass string) (*sv1.StorageClass, error)\n\tValidateVolumeSnapshotClass(ctx context.Context, volumeSnapshotClass string, groupVersion *metav1.GroupVersionForDiscovery) (*unstructured.Unstructured, error)\n}\n\ntype validateOperations struct {\n\tkubeCli kubernetes.Interface\n\tdynCli  dynamic.Interface\n}\n\nfunc NewArgumentValidator(kubeCli kubernetes.Interface, dynCli dynamic.Interface) ArgumentValidator {\n\treturn &validateOperations{\n\t\tkubeCli: kubeCli,\n\t\tdynCli:  dynCli,\n\t}\n}\n\nfunc (o *validateOperations) ValidatePVC(ctx context.Context, pvcName, namespace string) (*v1.PersistentVolumeClaim, error) {\n\tif o.kubeCli == nil {\n\t\treturn nil, fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\tpvc, err := o.kubeCli.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvcName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pvc, nil\n}\n\nfunc (o *validateOperations) ValidateVolumeSnapshot(ctx context.Context, snapshotName, namespace string, groupVersion *metav1.GroupVersionForDiscovery) (*snapv1.VolumeSnapshot, error) {\n\tVolSnapGVR := schema.GroupVersionResource{Group: snapv1.GroupName, Version: groupVersion.Version, Resource: common.VolumeSnapshotResourcePlural}\n\tuVS, err := o.dynCli.Resource(VolSnapGVR).Namespace(namespace).Get(ctx, snapshotName, metav1.GetOptions{})\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to get VolumeSnapshot: %v\", err)\n\t}\n\tvolumeSnapshot := &snapv1.VolumeSnapshot{}\n\terr = runtime.DefaultUnstructuredConverter.FromUnstructured(uVS.UnstructuredContent(), volumeSnapshot)\n\treturn volumeSnapshot, err\n}\n\nfunc (o *validateOperations) FetchPV(ctx context.Context, pvName string) (*v1.PersistentVolume, error) {\n\tif o.kubeCli == nil {\n\t\treturn nil, fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\tpv, err := o.kubeCli.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pv, nil\n}\n\nfunc (o *validateOperations) ValidateNamespace(ctx context.Context, namespace string) error {\n\tif o.kubeCli == nil {\n\t\treturn fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\t_, err := o.kubeCli.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})\n\treturn err\n}\n\nfunc (o *validateOperations) ValidateStorageClass(ctx context.Context, storageClass string) (*sv1.StorageClass, error) {\n\tif o.kubeCli == nil {\n\t\treturn nil, fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\tsc, err := o.kubeCli.StorageV1().StorageClasses().Get(ctx, storageClass, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn sc, nil\n}\n\nfunc (o *validateOperations) ValidateVolumeSnapshotClass(ctx context.Context, volumeSnapshotClass string, groupVersion *metav1.GroupVersionForDiscovery) (*unstructured.Unstructured, error) {\n\tif o.dynCli == nil {\n\t\treturn nil, fmt.Errorf(\"dynCli not initialized\")\n\t}\n\tVolSnapClassGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: groupVersion.Version, Resource: common.VolumeSnapshotClassResourcePlural}\n\treturn o.dynCli.Resource(VolSnapClassGVR).Get(ctx, volumeSnapshotClass, metav1.GetOptions{})\n}\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_application_creator.go -package=mocks . ApplicationCreator\ntype ApplicationCreator interface {\n\tCreatePVC(ctx context.Context, args *types.CreatePVCArgs) (*v1.PersistentVolumeClaim, error)\n\tCreatePod(ctx context.Context, args *types.CreatePodArgs) (*v1.Pod, error)\n\tWaitForPVCReady(ctx context.Context, namespace string, pvcName string) error\n\tWaitForPodReady(ctx context.Context, namespace string, podName string) error\n}\n\ntype applicationCreate struct {\n\tkubeCli               kubernetes.Interface\n\tk8sObjectReadyTimeout time.Duration\n}\n\nfunc NewApplicationCreator(kubeCli kubernetes.Interface, k8sObjectReadyTimeout time.Duration) ApplicationCreator {\n\treturn &applicationCreate{\n\t\tkubeCli:               kubeCli,\n\t\tk8sObjectReadyTimeout: k8sObjectReadyTimeout,\n\t}\n}\n\nfunc (c *applicationCreate) CreatePVC(ctx context.Context, args *types.CreatePVCArgs) (*v1.PersistentVolumeClaim, error) {\n\tif c.kubeCli == nil {\n\t\treturn nil, fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\tif err := args.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tpvc := &v1.PersistentVolumeClaim{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:         args.Name,\n\t\t\tGenerateName: args.GenerateName,\n\t\t\tNamespace:    args.Namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\tcreatedByLabel: \"yes\",\n\t\t\t},\n\t\t},\n\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\tAccessModes:      []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},\n\t\t\tStorageClassName: &args.StorageClass,\n\t\t\tVolumeMode:       args.VolumeMode,\n\t\t\tResources: v1.VolumeResourceRequirements{\n\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\tv1.ResourceStorage: resource.MustParse(\"1Gi\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif args.DataSource != nil {\n\t\tpvc.Spec.DataSource = args.DataSource\n\t}\n\n\tif args.RestoreSize != nil && !args.RestoreSize.IsZero() {\n\t\tpvc.Spec.Resources.Requests[v1.ResourceStorage] = *args.RestoreSize\n\t}\n\n\tpvcRes, err := c.kubeCli.CoreV1().PersistentVolumeClaims(args.Namespace).Create(ctx, pvc, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn pvc, err\n\t}\n\n\treturn pvcRes, nil\n}\n\nfunc (c *applicationCreate) CreatePod(ctx context.Context, args *types.CreatePodArgs) (*v1.Pod, error) {\n\tif c.kubeCli == nil {\n\t\treturn nil, fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\tif err := args.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tif args.ContainerImage == \"\" {\n\t\targs.ContainerImage = common.DefaultPodImage\n\t}\n\n\tvolumeNameInPod := \"persistent-storage\"\n\tcontainerName := args.Name\n\tif containerName == \"\" {\n\t\tcontainerName = args.GenerateName\n\t}\n\tpod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:         args.Name,\n\t\t\tGenerateName: args.GenerateName,\n\t\t\tNamespace:    args.Namespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\tcreatedByLabel: \"yes\",\n\t\t\t},\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{{\n\t\t\t\tName:    containerName,\n\t\t\t\tImage:   args.ContainerImage,\n\t\t\t\tCommand: args.Command,\n\t\t\t\tArgs:    args.ContainerArgs,\n\t\t\t}},\n\t\t},\n\t}\n\tpvcCount := 1\n\tfor pvcName, path := range args.PVCMap {\n\t\tpod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{\n\t\t\tName: fmt.Sprintf(\"%s-%d\", volumeNameInPod, pvcCount),\n\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\tPersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\tClaimName: pvcName,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif len(path.MountPath) != 0 {\n\t\t\tpod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{\n\t\t\t\tName:      fmt.Sprintf(\"%s-%d\", volumeNameInPod, pvcCount),\n\t\t\t\tMountPath: path.MountPath,\n\t\t\t})\n\t\t} else {\n\t\t\tpod.Spec.Containers[0].VolumeDevices = append(pod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{\n\t\t\t\tName:       fmt.Sprintf(\"%s-%d\", volumeNameInPod, pvcCount),\n\t\t\t\tDevicePath: path.DevicePath,\n\t\t\t})\n\t\t}\n\t\tpvcCount++\n\t}\n\n\tif args.RunAsUser > 0 {\n\t\tpod.Spec.SecurityContext = &v1.PodSecurityContext{\n\t\t\tRunAsUser: &args.RunAsUser,\n\t\t\tFSGroup:   &args.RunAsUser,\n\t\t}\n\t}\n\n\tpodRes, err := c.kubeCli.CoreV1().Pods(args.Namespace).Create(ctx, pod, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn pod, err\n\t}\n\treturn podRes, nil\n}\n\nfunc (c *applicationCreate) WaitForPVCReady(ctx context.Context, namespace, name string) error {\n\tif c.kubeCli == nil {\n\t\treturn fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\n\terr := c.waitForPVCReady(ctx, namespace, name)\n\tif err != nil {\n\t\teventErr := c.getErrorFromEvents(ctx, namespace, name, PVCKind)\n\t\tif eventErr != nil {\n\t\t\treturn errors.Wrapf(eventErr, \"had issues creating PVC\")\n\t\t}\n\t}\n\treturn err\n}\n\nfunc (c *applicationCreate) waitForPVCReady(ctx context.Context, namespace string, name string) error {\n\tpvcReadyTimeout := c.k8sObjectReadyTimeout\n\tif pvcReadyTimeout == 0 {\n\t\tpvcReadyTimeout = defaultReadyWaitTimeout\n\t}\n\n\ttimeoutCtx, waitCancel := context.WithTimeout(ctx, pvcReadyTimeout)\n\tdefer waitCancel()\n\treturn poll.Wait(timeoutCtx, func(ctx context.Context) (bool, error) {\n\t\tpvc, err := c.kubeCli.CoreV1().PersistentVolumeClaims(namespace).Get(timeoutCtx, name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn false, errors.Wrapf(err, \"could not find PVC\")\n\t\t}\n\n\t\tif pvc.Status.Phase == v1.ClaimLost {\n\t\t\treturn false, fmt.Errorf(\"failed to create a PVC, ClaimLost\")\n\t\t}\n\n\t\treturn pvc.Status.Phase == v1.ClaimBound, nil\n\t})\n}\n\nfunc (c *applicationCreate) WaitForPodReady(ctx context.Context, namespace string, podName string) error {\n\tif c.kubeCli == nil {\n\t\treturn fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\terr := c.waitForPodReady(ctx, namespace, podName)\n\tif err != nil {\n\t\teventErr := c.getErrorFromEvents(ctx, namespace, podName, PodKind)\n\t\tif eventErr != nil {\n\t\t\treturn errors.Wrapf(eventErr, \"had issues creating Pod\")\n\t\t}\n\t}\n\treturn err\n}\n\nfunc (c *applicationCreate) waitForPodReady(ctx context.Context, namespace string, podName string) error {\n\tpodReadyTimeout := c.k8sObjectReadyTimeout\n\tif podReadyTimeout == 0 {\n\t\tpodReadyTimeout = defaultReadyWaitTimeout\n\t}\n\n\ttimeoutCtx, waitCancel := context.WithTimeout(ctx, podReadyTimeout)\n\tdefer waitCancel()\n\terr := kube.WaitForPodReady(timeoutCtx, c.kubeCli, namespace, podName)\n\treturn err\n}\n\nfunc (c *applicationCreate) getErrorFromEvents(ctx context.Context, namespace, name, kind string) error {\n\tfieldSelectors := fields.Set{\n\t\t\"involvedObject.kind\": kind,\n\t\t\"involvedObject.name\": name,\n\t}.AsSelector().String()\n\tlistOptions := metav1.ListOptions{\n\t\tTypeMeta:      metav1.TypeMeta{Kind: kind},\n\t\tFieldSelector: fieldSelectors,\n\t}\n\n\tevents, eventErr := c.kubeCli.CoreV1().Events(namespace).List(ctx, listOptions)\n\tif eventErr != nil {\n\t\treturn errors.Wrapf(eventErr, \"failed to retreieve events for %s of kind: %s\", name, kind)\n\t}\n\n\tfor _, event := range events.Items {\n\t\tif event.Type == v1.EventTypeWarning {\n\t\t\treturn errors.New(event.Message)\n\t\t}\n\t}\n\treturn nil\n}\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_snapshot_creator.go -package=mocks . SnapshotCreator\ntype SnapshotCreator interface {\n\tNewSnapshotter() (kansnapshot.Snapshotter, error)\n\tCreateSnapshot(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateSnapshotArgs) (*snapv1.VolumeSnapshot, error)\n\tCreateFromSourceCheck(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateFromSourceCheckArgs, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error\n}\n\ntype snapshotCreate struct {\n\tkubeCli kubernetes.Interface\n\tdynCli  dynamic.Interface\n}\n\nfunc (c *snapshotCreate) NewSnapshotter() (kansnapshot.Snapshotter, error) {\n\tif c.kubeCli == nil {\n\t\treturn nil, fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\tif c.dynCli == nil {\n\t\treturn nil, fmt.Errorf(\"dynCli not initialized\")\n\t}\n\treturn kansnapshot.NewSnapshotter(c.kubeCli, c.dynCli), nil\n}\n\nfunc (c *snapshotCreate) CreateSnapshot(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateSnapshotArgs) (*snapv1.VolumeSnapshot, error) {\n\tif snapshotter == nil || args == nil {\n\t\treturn nil, fmt.Errorf(\"snapshotter or args are empty\")\n\t}\n\tif err := args.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\tsnapshotMeta := kansnapshot.ObjectMeta{\n\t\tName:      args.SnapshotName,\n\t\tNamespace: args.Namespace,\n\t}\n\terr := snapshotter.Create(ctx, args.PVCName, &args.VolumeSnapshotClass, true, snapshotMeta)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"CSI Driver failed to create snapshot for PVC (%s) in Namespace (%s)\", args.PVCName, args.Namespace)\n\t}\n\tsnap, err := snapshotter.Get(ctx, args.SnapshotName, args.Namespace)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to get CSI snapshot (%s) in Namespace (%s)\", args.SnapshotName, args.Namespace)\n\t}\n\treturn snap, nil\n}\n\nfunc (c *snapshotCreate) CreateFromSourceCheck(ctx context.Context, snapshotter kansnapshot.Snapshotter, args *types.CreateFromSourceCheckArgs, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error {\n\tif c.dynCli == nil {\n\t\treturn fmt.Errorf(\"dynCli not initialized\")\n\t}\n\tif SnapshotGroupVersion == nil || SnapshotGroupVersion.Version == \"\" {\n\t\treturn fmt.Errorf(\"snapshot group version not provided\")\n\t}\n\tif snapshotter == nil || args == nil {\n\t\treturn fmt.Errorf(\"snapshotter or args are nil\")\n\t}\n\tif err := args.Validate(); err != nil {\n\t\treturn err\n\t}\n\ttargetSnapClassName := clonePrefix + args.VolumeSnapshotClass\n\terr := snapshotter.CloneVolumeSnapshotClass(ctx, args.VolumeSnapshotClass, targetSnapClassName, kansnapshot.DeletionPolicyRetain, []string{DefaultVolumeSnapshotClassAnnotation})\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to clone a VolumeSnapshotClass to use to restore the snapshot\")\n\t}\n\tdefer func() {\n\t\tVolSnapClassGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: SnapshotGroupVersion.Version, Resource: common.VolumeSnapshotClassResourcePlural}\n\t\terr := c.dynCli.Resource(VolSnapClassGVR).Delete(ctx, targetSnapClassName, metav1.DeleteOptions{})\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Delete VSC Error (%s) - (%v)\\n\", targetSnapClassName, err)\n\t\t}\n\t}()\n\n\tsnapSrc, err := snapshotter.GetSource(ctx, args.SnapshotName, args.Namespace)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to get source snapshot source (%s)\", args.SnapshotName)\n\t}\n\tsnapshotCFSCloneName := clonePrefix + args.SnapshotName\n\t// test the CreateFromSource API\n\tdefer func() {\n\t\t_, _ = snapshotter.Delete(context.Background(), snapshotCFSCloneName, args.Namespace)\n\t}()\n\tsrc := &kansnapshot.Source{\n\t\tHandle:                  snapSrc.Handle,\n\t\tDriver:                  snapSrc.Driver,\n\t\tVolumeSnapshotClassName: targetSnapClassName,\n\t}\n\tsnapshotMeta := kansnapshot.ObjectMeta{\n\t\tName:      snapshotCFSCloneName,\n\t\tNamespace: args.Namespace,\n\t}\n\terr = snapshotter.CreateFromSource(ctx, src, true, snapshotMeta, kansnapshot.ObjectMeta{})\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to clone snapshot from source (%s)\", snapshotCFSCloneName)\n\t}\n\treturn nil\n}\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_cleaner.go -package=mocks . Cleaner\ntype Cleaner interface {\n\tDeletePVC(ctx context.Context, pvcName string, namespace string) error\n\tDeletePod(ctx context.Context, podName string, namespace string) error\n\tDeleteSnapshot(ctx context.Context, snapshotName string, namespace string, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error\n}\n\ntype cleanse struct {\n\tkubeCli kubernetes.Interface\n\tdynCli  dynamic.Interface\n}\n\nfunc NewCleaner(kubeCli kubernetes.Interface, dynCli dynamic.Interface) Cleaner {\n\treturn &cleanse{\n\t\tkubeCli: kubeCli,\n\t\tdynCli:  dynCli,\n\t}\n}\n\nfunc (c *cleanse) DeletePVC(ctx context.Context, pvcName string, namespace string) error {\n\tif c.kubeCli == nil {\n\t\treturn fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\treturn c.kubeCli.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvcName, metav1.DeleteOptions{})\n}\n\nfunc (c *cleanse) DeletePod(ctx context.Context, podName string, namespace string) error {\n\tif c.kubeCli == nil {\n\t\treturn fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\treturn c.kubeCli.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{})\n}\n\nfunc (c *cleanse) DeleteSnapshot(ctx context.Context, snapshotName string, namespace string, SnapshotGroupVersion *metav1.GroupVersionForDiscovery) error {\n\tif c.dynCli == nil {\n\t\treturn fmt.Errorf(\"dynCli not initialized\")\n\t}\n\tif SnapshotGroupVersion == nil || SnapshotGroupVersion.Version == \"\" {\n\t\treturn fmt.Errorf(\"snapshot group version not provided\")\n\t}\n\tVolSnapGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: SnapshotGroupVersion.Version, Resource: common.VolumeSnapshotResourcePlural}\n\treturn c.dynCli.Resource(VolSnapGVR).Namespace(namespace).Delete(ctx, snapshotName, metav1.DeleteOptions{})\n}\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_api_version_fetcher.go -package=mocks . ApiVersionFetcher\ntype ApiVersionFetcher interface {\n\tGetCSISnapshotGroupVersion() (*metav1.GroupVersionForDiscovery, error)\n}\n\ntype apiVersionFetch struct {\n\tkubeCli kubernetes.Interface\n}\n\nfunc (p *apiVersionFetch) GetCSISnapshotGroupVersion() (*metav1.GroupVersionForDiscovery, error) {\n\tif p.kubeCli == nil {\n\t\treturn nil, fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\tgroups, _, err := p.kubeCli.Discovery().ServerGroupsAndResources()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, group := range groups {\n\t\tif group.Name == common.SnapGroupName {\n\t\t\treturn &group.PreferredVersion, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"snapshot API group not found\")\n}\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_data_validator.go -package=mocks . DataValidator\ntype DataValidator interface {\n\tFetchPodData(ctx context.Context, podName string, podNamespace string) (string, error)\n}\n\ntype validateData struct {\n\tkubeCli kubernetes.Interface\n}\n\nfunc (p *validateData) FetchPodData(ctx context.Context, podName string, podNamespace string) (string, error) {\n\tif p.kubeCli == nil {\n\t\treturn \"\", fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\tstdout, _, err := kube.Exec(ctx, p.kubeCli, podNamespace, podName, \"\", []string{\"sh\", \"-c\", \"cat /data/out.txt\"}, nil)\n\treturn stdout, err\n}\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_port_forwarder.go -package=mocks . PortForwarder\ntype PortForwarder interface {\n\tFetchRestConfig() (*rest.Config, error)\n\tPortForwardAPod(req *types.PortForwardAPodRequest) error\n}\n\ntype portforward struct{}\n\nfunc (p *portforward) PortForwardAPod(req *types.PortForwardAPodRequest) error {\n\tpath := fmt.Sprintf(\"/api/v1/namespaces/%s/pods/%s/portforward\",\n\t\treq.Pod.Namespace, req.Pod.Name)\n\thostIP := strings.TrimPrefix(req.RestConfig.Host, \"https://\")\n\n\ttransport, upgrader, err := spdy.RoundTripperFor(req.RestConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &url.URL{Scheme: \"https\", Path: path, Host: hostIP})\n\tfw, err := pf.New(dialer, []string{fmt.Sprintf(\"%d:%d\", req.LocalPort, req.PodPort)}, req.StopCh, req.ReadyCh, &req.OutStream, &req.ErrOutStream)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn fw.ForwardPorts()\n}\n\nfunc (p *portforward) FetchRestConfig() (*rest.Config, error) {\n\treturn kube.LoadConfig()\n}\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_kube_executor.go -package=mocks . KubeExecutor\ntype KubeExecutor interface {\n\tExec(ctx context.Context, namespace string, podName string, ContainerName string, command []string) (string, error)\n}\n\ntype kubeExec struct {\n\tkubeCli kubernetes.Interface\n}\n\nfunc (k *kubeExec) Exec(ctx context.Context, namespace string, podName string, ContainerName string, command []string) (string, error) {\n\tif k.kubeCli == nil {\n\t\treturn \"\", fmt.Errorf(\"kubeCli not initialized\")\n\t}\n\tstdout, _, err := kube.Exec(ctx, k.kubeCli, namespace, podName, ContainerName, command, nil)\n\treturn stdout, err\n}\n"
  },
  {
    "path": "pkg/csi/csi_ops_test.go",
    "content": "package csi\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\tkansnapshot \"github.com/kanisterio/kanister/pkg/kube/snapshot\"\n\t\"github.com/kastenhq/kubestr/pkg/common\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\tpkgerrors \"github.com/pkg/errors\"\n\t. \"gopkg.in/check.v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\tdiscoveryfake \"k8s.io/client-go/discovery/fake\"\n\t\"k8s.io/client-go/dynamic\"\n\tfakedynamic \"k8s.io/client-go/dynamic/fake\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n\tk8stesting \"k8s.io/client-go/testing\"\n)\n\nfunc (s *CSITestSuite) TestGetDriverNameFromUVSC(c *C) {\n\n\tfor _, tc := range []struct {\n\t\tvsc     unstructured.Unstructured\n\t\tversion string\n\t\texpOut  string\n\t}{\n\t\t{\n\t\t\tvsc: unstructured.Unstructured{\n\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\tcommon.VolSnapClassDriverKey: \"p2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tversion: common.SnapshotVersion,\n\t\t\texpOut:  \"p2\",\n\t\t},\n\t\t{\n\t\t\tvsc: unstructured.Unstructured{\n\t\t\t\tObject: map[string]interface{}{},\n\t\t\t},\n\t\t\tversion: common.SnapshotVersion,\n\t\t\texpOut:  \"\",\n\t\t},\n\t\t{\n\t\t\tvsc: unstructured.Unstructured{\n\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\tcommon.VolSnapClassDriverKey: map[string]string{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tversion: common.SnapshotVersion,\n\t\t\texpOut:  \"\",\n\t\t},\n\t} {\n\t\tdriverName := getDriverNameFromUVSC(tc.vsc, tc.version)\n\t\tc.Assert(driverName, Equals, tc.expOut)\n\t}\n\n}\n\nfunc (s *CSITestSuite) TestGetCSISnapshotGroupVersion(c *C) {\n\tfor _, tc := range []struct {\n\t\tcli        kubernetes.Interface\n\t\tresources  []*metav1.APIResourceList\n\t\terrChecker Checker\n\t\tgvChecker  Checker\n\t}{\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"/////\",\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tgvChecker:  IsNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"snapshot.storage.k8s.io/v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tgvChecker:  NotNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"notrbac.authorization.k8s.io/v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tgvChecker:  IsNil,\n\t\t},\n\t\t{\n\t\t\tcli:        nil,\n\t\t\tresources:  nil,\n\t\t\terrChecker: NotNil,\n\t\t\tgvChecker:  IsNil,\n\t\t},\n\t} {\n\t\tcli := tc.cli\n\t\tif cli != nil {\n\t\t\tcli.Discovery().(*discoveryfake.FakeDiscovery).Resources = tc.resources\n\t\t}\n\t\tp := &apiVersionFetch{kubeCli: cli}\n\t\tgv, err := p.GetCSISnapshotGroupVersion()\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Check(gv, tc.gvChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestValidatePVC(c *C) {\n\tctx := context.Background()\n\tops := NewArgumentValidator(fake.NewSimpleClientset(), nil)\n\tpvc, err := ops.ValidatePVC(ctx, \"pvc\", \"ns\")\n\tc.Check(err, NotNil)\n\tc.Check(pvc, IsNil)\n\n\tops = NewArgumentValidator(fake.NewSimpleClientset(&v1.PersistentVolumeClaim{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: \"ns\",\n\t\t\tName:      \"pvc\",\n\t\t},\n\t}), nil)\n\tpvc, err = ops.ValidatePVC(ctx, \"pvc\", \"ns\")\n\tc.Check(err, IsNil)\n\tc.Check(pvc, NotNil)\n\n\tops = NewArgumentValidator(nil, nil)\n\tpvc, err = ops.ValidatePVC(ctx, \"pvc\", \"ns\")\n\tc.Check(err, NotNil)\n\tc.Check(pvc, IsNil)\n}\n\nfunc (s *CSITestSuite) TestFetchPV(c *C) {\n\tctx := context.Background()\n\tops := NewArgumentValidator(fake.NewSimpleClientset(), nil)\n\tpv, err := ops.FetchPV(ctx, \"pv\")\n\tc.Check(err, NotNil)\n\tc.Check(pv, IsNil)\n\n\tops = NewArgumentValidator(fake.NewSimpleClientset(&v1.PersistentVolume{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"pv\",\n\t\t},\n\t}), nil)\n\tpv, err = ops.FetchPV(ctx, \"pv\")\n\tc.Check(err, IsNil)\n\tc.Check(pv, NotNil)\n\n\tops = NewArgumentValidator(nil, nil)\n\tpv, err = ops.FetchPV(ctx, \"pv\")\n\tc.Check(err, NotNil)\n\tc.Check(pv, IsNil)\n}\n\nfunc (s *CSITestSuite) TestValidateNamespace(c *C) {\n\tctx := context.Background()\n\tops := NewArgumentValidator(fake.NewSimpleClientset(), nil)\n\terr := ops.ValidateNamespace(ctx, \"ns\")\n\tc.Check(err, NotNil)\n\n\tops = NewArgumentValidator(fake.NewSimpleClientset(&v1.Namespace{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"ns\",\n\t\t},\n\t}), nil)\n\terr = ops.ValidateNamespace(ctx, \"ns\")\n\tc.Check(err, IsNil)\n\n\tops = NewArgumentValidator(nil, nil)\n\terr = ops.ValidateNamespace(ctx, \"ns\")\n\tc.Check(err, NotNil)\n}\n\nfunc (s *CSITestSuite) TestValidateStorageClass(c *C) {\n\tctx := context.Background()\n\tops := &validateOperations{\n\t\tkubeCli: fake.NewSimpleClientset(),\n\t}\n\tsc, err := ops.ValidateStorageClass(ctx, \"sc\")\n\tc.Check(err, NotNil)\n\tc.Check(sc, IsNil)\n\n\tops = &validateOperations{\n\t\tkubeCli: fake.NewSimpleClientset(&sv1.StorageClass{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"sc\",\n\t\t\t},\n\t\t}),\n\t}\n\tsc, err = ops.ValidateStorageClass(ctx, \"sc\")\n\tc.Check(err, IsNil)\n\tc.Check(sc, NotNil)\n\n\tops = &validateOperations{\n\t\tkubeCli: nil,\n\t}\n\tsc, err = ops.ValidateStorageClass(ctx, \"sc\")\n\tc.Check(err, NotNil)\n\tc.Check(sc, IsNil)\n}\n\nfunc (s *CSITestSuite) TestValidateVolumeSnapshotClass(c *C) {\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\tops          *validateOperations\n\t\tgroupVersion string\n\t\tversion      string\n\t\terrChecker   Checker\n\t\tuVCSChecker  Checker\n\t}{\n\t\t{\n\t\t\tops: &validateOperations{\n\t\t\t\tdynCli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\t},\n\t\t\tgroupVersion: common.SnapshotVersion,\n\t\t\terrChecker:   NotNil,\n\t\t\tuVCSChecker:  IsNil,\n\t\t},\n\t\t{\n\t\t\tops: &validateOperations{\n\t\t\t\tdynCli: fakedynamic.NewSimpleDynamicClient(\n\t\t\t\t\truntime.NewScheme(),\n\t\t\t\t\t&unstructured.Unstructured{\n\t\t\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\t\t\"apiVersion\": fmt.Sprintf(\"%s/%s\", kansnapshot.GroupName, kansnapshot.Version),\n\t\t\t\t\t\t\t\"kind\":       \"VolumeSnapshotClass\",\n\t\t\t\t\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\"name\": \"vsc\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"driver\":         \"somesnapshotter\",\n\t\t\t\t\t\t\t\"deletionPolicy\": \"Delete\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t),\n\t\t\t},\n\t\t\tgroupVersion: common.SnapshotVersion,\n\t\t\tversion:      kansnapshot.Version,\n\t\t\terrChecker:   IsNil,\n\t\t\tuVCSChecker:  NotNil,\n\t\t},\n\t} {\n\t\tuVSC, err := tc.ops.ValidateVolumeSnapshotClass(ctx, \"vsc\", &metav1.GroupVersionForDiscovery{GroupVersion: tc.groupVersion, Version: tc.version})\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Check(uVSC, tc.uVCSChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestCreatePVC(c *C) {\n\tctx := context.Background()\n\tresourceQuantity := resource.MustParse(\"1Gi\")\n\tfor _, tc := range []struct {\n\t\tcli         kubernetes.Interface\n\t\targs        *types.CreatePVCArgs\n\t\tfailCreates bool\n\t\terrChecker  Checker\n\t\tpvcChecker  Checker\n\t}{\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePVCArgs{\n\t\t\t\tGenerateName: \"genName\",\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t\tDataSource: &v1.TypedLocalObjectReference{\n\t\t\t\t\tName: \"ds\",\n\t\t\t\t},\n\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePVCArgs{\n\t\t\t\tGenerateName: \"genName\",\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t\tDataSource: &v1.TypedLocalObjectReference{\n\t\t\t\t\tName: \"ds\",\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePVCArgs{\n\t\t\t\tGenerateName: \"genName\",\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePVCArgs{\n\t\t\t\tGenerateName: \"genName\",\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\tfailCreates: true,\n\t\t\terrChecker:  NotNil,\n\t\t\tpvcChecker:  NotNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePVCArgs{\n\t\t\t\tGenerateName: \"\",\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpvcChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePVCArgs{\n\t\t\t\tGenerateName: \"something\",\n\t\t\t\tStorageClass: \"\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpvcChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePVCArgs{\n\t\t\t\tGenerateName: \"Something\",\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tNamespace:    \"\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpvcChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tcli:        nil,\n\t\t\targs:       &types.CreatePVCArgs{},\n\t\t\terrChecker: NotNil,\n\t\t\tpvcChecker: IsNil,\n\t\t},\n\t} {\n\t\tappCreator := NewApplicationCreator(tc.cli, 0)\n\t\tcreator := appCreator.(*applicationCreate)\n\t\tif tc.failCreates {\n\t\t\tcreator.kubeCli.(*fake.Clientset).PrependReactor(\"create\", \"persistentvolumeclaims\", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\treturn true, nil, errors.New(\"Error creating object\")\n\t\t\t})\n\t\t}\n\t\tpvc, err := creator.CreatePVC(ctx, tc.args)\n\t\tc.Check(pvc, tc.pvcChecker)\n\t\tc.Check(err, tc.errChecker)\n\t\tif pvc != nil && err == nil {\n\t\t\t_, ok := pvc.Labels[createdByLabel]\n\t\t\tc.Assert(ok, Equals, true)\n\t\t\tc.Assert(pvc.GenerateName, Equals, tc.args.GenerateName)\n\t\t\tc.Assert(pvc.Namespace, Equals, tc.args.Namespace)\n\t\t\tc.Assert(pvc.Spec.AccessModes, DeepEquals, []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})\n\t\t\tc.Assert(*pvc.Spec.StorageClassName, Equals, tc.args.StorageClass)\n\t\t\tc.Assert(pvc.Spec.DataSource, DeepEquals, tc.args.DataSource)\n\t\t\tif tc.args.RestoreSize != nil {\n\t\t\t\tc.Assert(pvc.Spec.Resources, DeepEquals, v1.VolumeResourceRequirements{\n\t\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\t\tv1.ResourceStorage: *tc.args.RestoreSize,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\tc.Assert(pvc.Spec.Resources, DeepEquals, v1.VolumeResourceRequirements{\n\t\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\t\tv1.ResourceStorage: resource.MustParse(\"1Gi\"),\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *CSITestSuite) TestCreatePod(c *C) {\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\tdescription string\n\t\tcli         kubernetes.Interface\n\t\targs        *types.CreatePodArgs\n\t\tfailCreates bool\n\t\terrChecker  Checker\n\t\tpodChecker  Checker\n\t}{\n\t\t{\n\t\t\tdescription: \"pod with container image and runAsUser 1000 created\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePodArgs{\n\t\t\t\tGenerateName:   \"name\",\n\t\t\t\tNamespace:      \"ns\",\n\t\t\t\tCommand:        []string{\"somecommand\"},\n\t\t\t\tRunAsUser:      1000,\n\t\t\t\tContainerImage: \"containerimage\",\n\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\"pvcname\": {\n\t\t\t\t\t\tMountPath: \"/mnt/fs\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpodChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Pod creation error on kubeCli\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePodArgs{\n\t\t\t\tGenerateName: \"name\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t\tCommand:      []string{\"somecommand\"},\n\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\"pvcname\": {\n\t\t\t\t\t\tMountPath: \"/mnt/fs\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tfailCreates: true,\n\t\t\terrChecker:  NotNil,\n\t\t\tpodChecker:  NotNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Neither Name nor GenerateName set\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePodArgs{\n\t\t\t\tGenerateName: \"\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t\tCommand:      []string{\"somecommand\"},\n\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\"pvcname\": {\n\t\t\t\t\t\tMountPath: \"/mnt/fs\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Both Name and GenerateName set\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePodArgs{\n\t\t\t\tGenerateName: \"name\",\n\t\t\t\tName:         \"name\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t\tCommand:      []string{\"somecommand\"},\n\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\"pvcname\": {\n\t\t\t\t\t\tMountPath: \"/mnt/fs\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Neither MountPath nor DevicePath set error\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePodArgs{\n\t\t\t\tGenerateName: \"name\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t\tCommand:      []string{\"somecommand\"},\n\t\t\t\tPVCMap:       map[string]types.VolumePath{\"pvcname\": {}},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Both MountPath and DevicePath set error\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePodArgs{\n\t\t\t\tGenerateName: \"name\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t\tCommand:      []string{\"somecommand\"},\n\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\"pvcname\": {\n\t\t\t\t\t\tMountPath:  \"/mnt/fs\",\n\t\t\t\t\t\tDevicePath: \"/mnt/dev\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"PVC name not set error\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePodArgs{\n\t\t\t\tGenerateName: \"name\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t\tCommand:      []string{\"somecommand\"},\n\t\t\t\tPVCMap:       map[string]types.VolumePath{\"\": {MountPath: \"/mnt/fs\"}},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"default namespace pod is created\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePodArgs{\n\t\t\t\tGenerateName: \"name\",\n\t\t\t\tNamespace:    \"\",\n\t\t\t\tCommand:      []string{\"somecommand\"},\n\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\"pvcname\": {\n\t\t\t\t\t\tMountPath: \"/mnt/fs\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"ns namespace pod is created (GenerateName/MountPath)\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePodArgs{\n\t\t\t\tGenerateName: \"name\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t\tCommand:      []string{\"somecommand\"},\n\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\"pvcname\": {\n\t\t\t\t\t\tMountPath: \"/mnt/fs\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpodChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"ns namespace pod is created (Name/DevicePath)\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\targs: &types.CreatePodArgs{\n\t\t\t\tName:      \"name\",\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tCommand:   []string{\"somecommand\"},\n\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\"pvcname\": {\n\t\t\t\t\t\tDevicePath: \"/mnt/dev\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpodChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"kubeCli not initialized\",\n\t\t\tcli:         nil,\n\t\t\targs:        &types.CreatePodArgs{},\n\t\t\terrChecker:  NotNil,\n\t\t\tpodChecker:  IsNil,\n\t\t},\n\t} {\n\t\tfmt.Println(\"test:\", tc.description)\n\t\tcreator := &applicationCreate{kubeCli: tc.cli}\n\t\tif tc.failCreates {\n\t\t\tcreator.kubeCli.(*fake.Clientset).PrependReactor(\"create\", \"pods\", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\treturn true, nil, errors.New(\"Error creating object\")\n\t\t\t})\n\t\t}\n\t\tpod, err := creator.CreatePod(ctx, tc.args)\n\t\tc.Check(pod, tc.podChecker)\n\t\tc.Check(err, tc.errChecker)\n\t\tif pod != nil && err == nil {\n\t\t\t_, ok := pod.Labels[createdByLabel]\n\t\t\tc.Assert(ok, Equals, true)\n\t\t\tif tc.args.GenerateName != \"\" {\n\t\t\t\tc.Assert(pod.GenerateName, Equals, tc.args.GenerateName)\n\t\t\t\tc.Assert(pod.Spec.Containers[0].Name, Equals, tc.args.GenerateName)\n\t\t\t} else {\n\t\t\t\tc.Assert(pod.Name, Equals, tc.args.Name)\n\t\t\t\tc.Assert(pod.Spec.Containers[0].Name, Equals, tc.args.Name)\n\t\t\t}\n\t\t\tc.Assert(pod.Namespace, Equals, tc.args.Namespace)\n\t\t\tc.Assert(len(pod.Spec.Containers), Equals, 1)\n\t\t\tc.Assert(pod.Spec.Containers[0].Command, DeepEquals, tc.args.Command)\n\t\t\tc.Assert(pod.Spec.Containers[0].Args, DeepEquals, tc.args.ContainerArgs)\n\t\t\tindex := 0\n\t\t\tpvcCount := 1\n\t\t\tfor pvcName, path := range tc.args.PVCMap {\n\t\t\t\tif len(path.MountPath) != 0 {\n\t\t\t\t\tc.Assert(pod.Spec.Containers[0].VolumeMounts[index], DeepEquals, v1.VolumeMount{\n\t\t\t\t\t\tName:      fmt.Sprintf(\"persistent-storage-%d\", pvcCount),\n\t\t\t\t\t\tMountPath: path.MountPath,\n\t\t\t\t\t})\n\t\t\t\t\tc.Assert(pod.Spec.Containers[0].VolumeDevices, IsNil)\n\t\t\t\t} else {\n\t\t\t\t\tc.Assert(pod.Spec.Containers[0].VolumeDevices[index], DeepEquals, v1.VolumeDevice{\n\t\t\t\t\t\tName:       fmt.Sprintf(\"persistent-storage-%d\", pvcCount),\n\t\t\t\t\t\tDevicePath: path.DevicePath,\n\t\t\t\t\t})\n\t\t\t\t\tc.Assert(pod.Spec.Containers[0].VolumeMounts, IsNil)\n\t\t\t\t}\n\t\t\t\tc.Assert(pod.Spec.Volumes[index], DeepEquals, v1.Volume{\n\t\t\t\t\tName: fmt.Sprintf(\"persistent-storage-%d\", pvcCount),\n\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\tClaimName: pvcName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tindex++\n\t\t\t\tpvcCount++\n\t\t\t}\n\t\t\tif tc.args.ContainerImage == \"\" {\n\t\t\t\tc.Assert(pod.Spec.Containers[0].Image, Equals, common.DefaultPodImage)\n\t\t\t} else {\n\t\t\t\tc.Assert(pod.Spec.Containers[0].Image, Equals, tc.args.ContainerImage)\n\t\t\t}\n\t\t\tif tc.args.RunAsUser > 0 {\n\t\t\t\tc.Assert(pod.Spec.SecurityContext, DeepEquals, &v1.PodSecurityContext{\n\t\t\t\t\tRunAsUser: &tc.args.RunAsUser,\n\t\t\t\t\tFSGroup:   &tc.args.RunAsUser,\n\t\t\t\t})\n\t\t\t} else {\n\t\t\t\tc.Check(pod.Spec.SecurityContext, IsNil)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *CSITestSuite) TestCreateSnapshot(c *C) {\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\tsnapshotter kansnapshot.Snapshotter\n\t\targs        *types.CreateSnapshotArgs\n\t\tsnapChecker Checker\n\t\terrChecker  Checker\n\t}{\n\t\t{\n\t\t\tsnapshotter: &fakeSnapshotter{\n\t\t\t\tgetSnap: &snapv1.VolumeSnapshot{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"createdName\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: &types.CreateSnapshotArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t},\n\t\t\tsnapChecker: NotNil,\n\t\t\terrChecker:  IsNil,\n\t\t},\n\t\t{\n\t\t\tsnapshotter: &fakeSnapshotter{\n\t\t\t\tgetErr: fmt.Errorf(\"get Error\"),\n\t\t\t},\n\t\t\targs: &types.CreateSnapshotArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t},\n\t\t\tsnapChecker: IsNil,\n\t\t\terrChecker:  NotNil,\n\t\t},\n\t\t{\n\t\t\tsnapshotter: &fakeSnapshotter{\n\t\t\t\tcreateErr: fmt.Errorf(\"create Error\"),\n\t\t\t},\n\t\t\targs: &types.CreateSnapshotArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t},\n\t\t\tsnapChecker: IsNil,\n\t\t\terrChecker:  NotNil,\n\t\t},\n\t\t{\n\t\t\tsnapshotter: &fakeSnapshotter{\n\t\t\t\tcreateErr: fmt.Errorf(\"create Error\"),\n\t\t\t},\n\t\t\targs: &types.CreateSnapshotArgs{\n\t\t\t\tNamespace:           \"\",\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t},\n\t\t\tsnapChecker: IsNil,\n\t\t\terrChecker:  NotNil,\n\t\t},\n\t\t{\n\t\t\tsnapshotter: &fakeSnapshotter{\n\t\t\t\tcreateErr: fmt.Errorf(\"create Error\"),\n\t\t\t},\n\t\t\targs: &types.CreateSnapshotArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tPVCName:             \"\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t},\n\t\t\tsnapChecker: IsNil,\n\t\t\terrChecker:  NotNil,\n\t\t},\n\t\t{\n\t\t\tsnapshotter: &fakeSnapshotter{\n\t\t\t\tcreateErr: fmt.Errorf(\"create Error\"),\n\t\t\t},\n\t\t\targs: &types.CreateSnapshotArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"\",\n\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t},\n\t\t\tsnapChecker: IsNil,\n\t\t\terrChecker:  NotNil,\n\t\t},\n\t\t{\n\t\t\tsnapshotter: &fakeSnapshotter{\n\t\t\t\tcreateErr: fmt.Errorf(\"create Error\"),\n\t\t\t},\n\t\t\targs: &types.CreateSnapshotArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"\",\n\t\t\t},\n\t\t\tsnapChecker: IsNil,\n\t\t\terrChecker:  NotNil,\n\t\t},\n\t\t{\n\t\t\tsnapshotter: &fakeSnapshotter{},\n\t\t\tsnapChecker: IsNil,\n\t\t\terrChecker:  NotNil,\n\t\t},\n\t\t{\n\t\t\tsnapChecker: IsNil,\n\t\t\terrChecker:  NotNil,\n\t\t},\n\t} {\n\t\tsnapCreator := &snapshotCreate{}\n\t\tsnapshot, err := snapCreator.CreateSnapshot(ctx, tc.snapshotter, tc.args)\n\t\tc.Check(snapshot, tc.snapChecker)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestCreateFromSourceCheck(c *C) {\n\tctx := context.Background()\n\tgv := &metav1.GroupVersionForDiscovery{Version: kansnapshot.Version}\n\tfor _, tc := range []struct {\n\t\tdyncli       dynamic.Interface\n\t\tsnapshotter  kansnapshot.Snapshotter\n\t\targs         *types.CreateFromSourceCheckArgs\n\t\tgroupVersion *metav1.GroupVersionForDiscovery\n\t\terrChecker   Checker\n\t}{\n\t\t{\n\t\t\tdyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tsnapshotter: &fakeSnapshotter{\n\t\t\t\tgsSrc: &kansnapshot.Source{\n\t\t\t\t\tHandle: \"handle\",\n\t\t\t\t\tDriver: \"driver\",\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: &types.CreateFromSourceCheckArgs{\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"snapshot\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tgroupVersion: gv,\n\t\t\terrChecker:   IsNil,\n\t\t},\n\t\t{\n\t\t\tdyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tsnapshotter: &fakeSnapshotter{\n\t\t\t\tgsSrc: &kansnapshot.Source{\n\t\t\t\t\tHandle: \"handle\",\n\t\t\t\t\tDriver: \"driver\",\n\t\t\t\t},\n\t\t\t\tcfsErr: fmt.Errorf(\"cfs error\"),\n\t\t\t},\n\t\t\targs: &types.CreateFromSourceCheckArgs{\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"snapshot\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tgroupVersion: gv,\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t\t{\n\t\t\tdyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tsnapshotter: &fakeSnapshotter{\n\t\t\t\tgsErr: fmt.Errorf(\"gs error\"),\n\t\t\t},\n\t\t\targs: &types.CreateFromSourceCheckArgs{\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"snapshot\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tgroupVersion: gv,\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t\t{\n\t\t\tdyncli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tsnapshotter: &fakeSnapshotter{\n\t\t\t\tcvsErr: fmt.Errorf(\"cvs error\"),\n\t\t\t},\n\t\t\targs: &types.CreateFromSourceCheckArgs{\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"snapshot\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tgroupVersion: gv,\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t\t{\n\t\t\tdyncli:      fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tsnapshotter: &fakeSnapshotter{},\n\t\t\targs: &types.CreateFromSourceCheckArgs{\n\t\t\t\tVolumeSnapshotClass: \"\",\n\t\t\t\tSnapshotName:        \"snapshot\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tgroupVersion: gv,\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t\t{\n\t\t\tdyncli:      fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tsnapshotter: &fakeSnapshotter{},\n\t\t\targs: &types.CreateFromSourceCheckArgs{\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tgroupVersion: gv,\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t\t{\n\t\t\tdyncli:      fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tsnapshotter: &fakeSnapshotter{},\n\t\t\targs: &types.CreateFromSourceCheckArgs{\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSnapshotName:        \"snapshot\",\n\t\t\t\tNamespace:           \"\",\n\t\t\t},\n\t\t\tgroupVersion: gv,\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t\t{\n\t\t\tdyncli:       fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tsnapshotter:  &fakeSnapshotter{},\n\t\t\tgroupVersion: gv,\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t\t{\n\t\t\tdyncli:       fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tgroupVersion: gv,\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t\t{\n\t\t\tdyncli:       fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tgroupVersion: nil,\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t\t{\n\t\t\tdyncli:     nil,\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tsnapCreator := &snapshotCreate{\n\t\t\tdynCli: tc.dyncli,\n\t\t}\n\t\terr := snapCreator.CreateFromSourceCheck(ctx, tc.snapshotter, tc.args, tc.groupVersion)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\ntype fakeSnapshotter struct {\n\tname string\n\n\tcreateErr error\n\n\tgetSnap *snapv1.VolumeSnapshot\n\tgetErr  error\n\n\tcvsErr error\n\n\tgsSrc *kansnapshot.Source\n\tgsErr error\n\n\tcfsErr error\n}\n\nfunc (f *fakeSnapshotter) GroupVersion(ctx context.Context) schema.GroupVersion {\n\treturn schema.GroupVersion{\n\t\tGroup:   common.SnapGroupName,\n\t\tVersion: \"v1\",\n\t}\n}\n\nfunc (f *fakeSnapshotter) GetVolumeSnapshotClass(ctx context.Context, annotationKey, annotationValue, storageClassName string) (string, error) {\n\treturn \"\", nil\n}\nfunc (f *fakeSnapshotter) CloneVolumeSnapshotClass(ctx context.Context, sourceClassName, targetClassName, newDeletionPolicy string, excludeAnnotations []string) error {\n\treturn f.cvsErr\n}\nfunc (f *fakeSnapshotter) Create(ctx context.Context, pvcName string, snapshotClass *string, waitForReady bool, snapshotMeta kansnapshot.ObjectMeta) error {\n\treturn f.createErr\n}\nfunc (f *fakeSnapshotter) Get(ctx context.Context, name, namespace string) (*snapv1.VolumeSnapshot, error) {\n\treturn f.getSnap, f.getErr\n}\nfunc (f *fakeSnapshotter) Delete(ctx context.Context, name, namespace string) (*snapv1.VolumeSnapshot, error) {\n\treturn nil, nil\n}\nfunc (f *fakeSnapshotter) DeleteContent(ctx context.Context, name string) error { return nil }\nfunc (f *fakeSnapshotter) Clone(ctx context.Context, name, namespace string, waitForReady bool, snapshotMeta, contentMeta kansnapshot.ObjectMeta) error {\n\treturn nil\n}\nfunc (f *fakeSnapshotter) GetSource(ctx context.Context, snapshotName, namespace string) (*kansnapshot.Source, error) {\n\treturn f.gsSrc, f.gsErr\n}\nfunc (f *fakeSnapshotter) CreateFromSource(ctx context.Context, source *kansnapshot.Source, waitForReady bool, snapshotMeta, contentMeta kansnapshot.ObjectMeta) error {\n\treturn f.cfsErr\n}\nfunc (f *fakeSnapshotter) CreateContentFromSource(ctx context.Context, source *kansnapshot.Source, snapshotName, snapshotNs, deletionPolicy string, contentMeta kansnapshot.ObjectMeta) error {\n\treturn nil\n}\nfunc (f *fakeSnapshotter) WaitOnReadyToUse(ctx context.Context, snapshotName, namespace string) error {\n\treturn nil\n}\n\nfunc (f *fakeSnapshotter) List(ctx context.Context, namespace string, labels map[string]string) (*snapv1.VolumeSnapshotList, error) {\n\treturn nil, nil\n}\n\nfunc (s *CSITestSuite) TestDeletePVC(c *C) {\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\tcli        kubernetes.Interface\n\t\tpvcName    string\n\t\tnamespace  string\n\t\terrChecker Checker\n\t}{\n\t\t{\n\t\t\tcli:        fake.NewSimpleClientset(),\n\t\t\tpvcName:    \"pvc\",\n\t\t\tnamespace:  \"ns\",\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(&v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\tNamespace: \"notns\",\n\t\t\t\t},\n\t\t\t}),\n\t\t\tpvcName:    \"pvc\",\n\t\t\tnamespace:  \"ns\",\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(&v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t}),\n\t\t\tpvcName:    \"pvc\",\n\t\t\tnamespace:  \"ns\",\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tcli:        nil,\n\t\t\tpvcName:    \"pvc\",\n\t\t\tnamespace:  \"ns\",\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tcleaner := NewCleaner(tc.cli, nil)\n\t\terr := cleaner.DeletePVC(ctx, tc.pvcName, tc.namespace)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestDeletePod(c *C) {\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\tcli        kubernetes.Interface\n\t\tpodName    string\n\t\tnamespace  string\n\t\terrChecker Checker\n\t}{\n\t\t{\n\t\t\tcli:        fake.NewSimpleClientset(),\n\t\t\tpodName:    \"pod\",\n\t\t\tnamespace:  \"ns\",\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(&v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"notns\",\n\t\t\t\t},\n\t\t\t}),\n\t\t\tpodName:    \"pod\",\n\t\t\tnamespace:  \"ns\",\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(&v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t}),\n\t\t\tpodName:    \"pod\",\n\t\t\tnamespace:  \"ns\",\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tcli:        nil,\n\t\t\tpodName:    \"pod\",\n\t\t\tnamespace:  \"ns\",\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tcleaner := &cleanse{\n\t\t\tkubeCli: tc.cli,\n\t\t}\n\t\terr := cleaner.DeletePod(ctx, tc.podName, tc.namespace)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestDeleteSnapshot(c *C) {\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\tcli          dynamic.Interface\n\t\tsnapshotName string\n\t\tnamespace    string\n\t\tgroupVersion *metav1.GroupVersionForDiscovery\n\t\terrChecker   Checker\n\t}{\n\t\t{\n\t\t\tcli:          fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tsnapshotName: \"snap1\",\n\t\t\tnamespace:    \"ns\",\n\t\t\tgroupVersion: &metav1.GroupVersionForDiscovery{\n\t\t\t\tVersion: kansnapshot.Version,\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tcli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme(),\n\t\t\t\t&unstructured.Unstructured{\n\t\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\t\"apiVersion\": fmt.Sprintf(\"%s/%s\", kansnapshot.GroupName, \"v1beta1\"),\n\t\t\t\t\t\t\"kind\":       \"VolumeSnapshot\",\n\t\t\t\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\t\t\t\"name\":      \"snap1\",\n\t\t\t\t\t\t\t\"namespace\": \"ns\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t\tsnapshotName: \"snap1\",\n\t\t\tnamespace:    \"ns\",\n\t\t\terrChecker:   NotNil,\n\t\t\tgroupVersion: &metav1.GroupVersionForDiscovery{\n\t\t\t\tVersion: kansnapshot.Version,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tcli: fakedynamic.NewSimpleDynamicClient(runtime.NewScheme(),\n\t\t\t\t&unstructured.Unstructured{\n\t\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\t\"apiVersion\": fmt.Sprintf(\"%s/%s\", kansnapshot.GroupName, kansnapshot.Version),\n\t\t\t\t\t\t\"kind\":       \"VolumeSnapshot\",\n\t\t\t\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\t\t\t\"name\":      \"snap1\",\n\t\t\t\t\t\t\t\"namespace\": \"ns\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t\tsnapshotName: \"snap1\",\n\t\t\tnamespace:    \"ns\",\n\t\t\terrChecker:   IsNil,\n\t\t\tgroupVersion: &metav1.GroupVersionForDiscovery{\n\t\t\t\tVersion: kansnapshot.Version,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tcli:          fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tsnapshotName: \"pod\",\n\t\t\tnamespace:    \"ns\",\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t\t{\n\t\t\tcli:          nil,\n\t\t\tsnapshotName: \"pod\",\n\t\t\tnamespace:    \"ns\",\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t} {\n\t\tcleaner := NewCleaner(nil, tc.cli)\n\t\terr := cleaner.DeleteSnapshot(ctx, tc.snapshotName, tc.namespace, tc.groupVersion)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestWaitForPVCReady(c *C) {\n\tctx := context.Background()\n\tconst ns = \"ns\"\n\tconst pvc = \"pvc\"\n\tboundPVC := s.getPVC(ns, pvc, v1.ClaimBound)\n\tclaimLostPVC := s.getPVC(ns, pvc, v1.ClaimLost)\n\tstuckPVC := s.getPVC(ns, pvc, \"\")\n\tnormalGetFunc := func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\treturn\n\t}\n\tdeadlineExceededGetFunc := func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\treturn true, nil, pkgerrors.Wrapf(context.DeadlineExceeded, \"some wrapped error\")\n\t}\n\n\twarningEvent := v1.Event{\n\t\tType:    v1.EventTypeWarning,\n\t\tMessage: \"waiting for a volume to be created, either by external provisioner \\\"ceph.com/rbd\\\" or manually created by system administrator\",\n\t}\n\tfor _, tc := range []struct {\n\t\tdescription string\n\t\tcli         kubernetes.Interface\n\t\tpvcGetFunc  func(action k8stesting.Action) (handled bool, ret runtime.Object, err error)\n\t\teventsList  []v1.Event\n\t\terrChecker  Checker\n\t\terrString   string\n\t}{\n\t\t{\n\t\t\tdescription: \"Happy path\",\n\t\t\tcli:         fake.NewSimpleClientset(boundPVC),\n\t\t\tpvcGetFunc:  normalGetFunc,\n\t\t\terrChecker:  IsNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Missing PVC\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\tpvcGetFunc:  normalGetFunc,\n\t\t\terrChecker:  NotNil,\n\t\t\terrString:   \"could not find PVC\",\n\t\t},\n\t\t{\n\t\t\tdescription: \"PVC ClaimLost\",\n\t\t\tcli:         fake.NewSimpleClientset(claimLostPVC),\n\t\t\tpvcGetFunc:  normalGetFunc,\n\t\t\terrChecker:  NotNil,\n\t\t\terrString:   \"ClaimLost\",\n\t\t},\n\t\t{\n\t\t\tdescription: \"context.DeadlineExceeded but no event warnings\",\n\t\t\tcli:         fake.NewSimpleClientset(stuckPVC),\n\t\t\tpvcGetFunc:  deadlineExceededGetFunc,\n\t\t\terrChecker:  NotNil,\n\t\t\terrString:   context.DeadlineExceeded.Error(),\n\t\t},\n\t\t{\n\t\t\tdescription: \"context.DeadlineExceeded, unable to provision PVC\",\n\t\t\tcli:         fake.NewSimpleClientset(stuckPVC),\n\t\t\tpvcGetFunc:  deadlineExceededGetFunc,\n\t\t\teventsList:  []v1.Event{warningEvent},\n\t\t\terrChecker:  NotNil,\n\t\t\terrString:   warningEvent.Message,\n\t\t},\n\t} {\n\t\tfmt.Println(\"test:\", tc.description)\n\t\tcreator := &applicationCreate{kubeCli: tc.cli}\n\t\tcreator.kubeCli.(*fake.Clientset).PrependReactor(\"get\", \"persistentvolumeclaims\", tc.pvcGetFunc)\n\t\tcreator.kubeCli.(*fake.Clientset).PrependReactor(\"list\", \"events\", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\treturn true, &v1.EventList{Items: tc.eventsList}, nil\n\t\t})\n\t\terr := creator.WaitForPVCReady(ctx, ns, pvc)\n\t\tc.Check(err, tc.errChecker)\n\t\tif err != nil {\n\t\t\tc.Assert(strings.Contains(err.Error(), tc.errString), Equals, true)\n\t\t}\n\t}\n}\n\nfunc (s *CSITestSuite) getPVC(ns, pvc string, phase v1.PersistentVolumeClaimPhase) *v1.PersistentVolumeClaim {\n\treturn &v1.PersistentVolumeClaim{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      pvc,\n\t\t\tNamespace: ns,\n\t\t},\n\t\tStatus: v1.PersistentVolumeClaimStatus{\n\t\t\tPhase: phase,\n\t\t},\n\t}\n}\n\nfunc (s *CSITestSuite) TestWaitForPodReady(c *C) {\n\tctx := context.Background()\n\tconst ns = \"ns\"\n\tconst podName = \"pod\"\n\treadyPod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: ns,\n\t\t\tName:      podName,\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{\n\t\t\t\t{Name: \"container-0\"},\n\t\t\t},\n\t\t},\n\t\tStatus: v1.PodStatus{\n\t\t\tPhase: v1.PodRunning,\n\t\t},\n\t}\n\twarningEvent := v1.Event{\n\t\tType:    v1.EventTypeWarning,\n\t\tMessage: \"warning event\",\n\t}\n\n\tfor _, tc := range []struct {\n\t\tdescription string\n\t\tcli         kubernetes.Interface\n\t\teventsList  []v1.Event\n\t\terrChecker  Checker\n\t\terrString   string\n\t}{\n\t\t{\n\t\t\tdescription: \"Happy path\",\n\t\t\tcli:         fake.NewSimpleClientset(readyPod),\n\t\t\terrChecker:  IsNil,\n\t\t},\n\t\t{\n\t\t\tdescription: \"Not found\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\terrChecker:  NotNil,\n\t\t\terrString:   \"not found\",\n\t\t},\n\t\t{\n\t\t\tdescription: \"Pod events\",\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\terrChecker:  NotNil,\n\t\t\terrString:   \"had issues creating Pod\",\n\t\t\teventsList:  []v1.Event{warningEvent},\n\t\t},\n\t\t{\n\t\t\tdescription: \"No CLI\",\n\t\t\terrChecker:  NotNil,\n\t\t\terrString:   \"kubeCli not initialized\",\n\t\t},\n\t} {\n\t\tfmt.Println(\"TestWaitForPodReady:\", tc.description)\n\t\tcreator := &applicationCreate{kubeCli: tc.cli}\n\t\tif len(tc.eventsList) > 0 {\n\t\t\tcreator.kubeCli.(*fake.Clientset).PrependReactor(\"list\", \"events\", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\treturn true, &v1.EventList{Items: tc.eventsList}, nil\n\t\t\t})\n\t\t}\n\t\terr := creator.WaitForPodReady(ctx, ns, podName)\n\t\tc.Check(err, tc.errChecker)\n\t\tif err != nil {\n\t\t\tc.Assert(strings.Contains(err.Error(), tc.errString), Equals, true)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/csi/file_restore_inspector.go",
    "content": "package csi\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/signal\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t\"github.com/pkg/errors\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/dynamic\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\ntype FileRestoreRunner struct {\n\tKubeCli      kubernetes.Interface\n\tDynCli       dynamic.Interface\n\trestoreSteps FileRestoreStepper\n\trestorePVC   *v1.PersistentVolumeClaim\n\tpod          *v1.Pod\n\tsnapshot     *snapv1.VolumeSnapshot\n}\n\nfunc (f *FileRestoreRunner) RunFileRestore(ctx context.Context, args *types.FileRestoreArgs) error {\n\tf.restoreSteps = &fileRestoreSteps{\n\t\tvalidateOps: &validateOperations{\n\t\t\tkubeCli: f.KubeCli,\n\t\t\tdynCli:  f.DynCli,\n\t\t},\n\t\tversionFetchOps: &apiVersionFetch{\n\t\t\tkubeCli: f.KubeCli,\n\t\t},\n\t\tcreateAppOps: &applicationCreate{\n\t\t\tkubeCli: f.KubeCli,\n\t\t},\n\t\tportForwardOps: &portforward{},\n\t\tkubeExecutor: &kubeExec{\n\t\t\tkubeCli: f.KubeCli,\n\t\t},\n\t\tcleanerOps: &cleanse{\n\t\t\tkubeCli: f.KubeCli,\n\t\t\tdynCli:  f.DynCli,\n\t\t},\n\t}\n\treturn f.RunFileRestoreHelper(ctx, args)\n}\n\nfunc (f *FileRestoreRunner) RunFileRestoreHelper(ctx context.Context, args *types.FileRestoreArgs) error {\n\tdefer func() {\n\t\tf.restoreSteps.Cleanup(ctx, args, f.restorePVC, f.pod)\n\t}()\n\n\tif f.KubeCli == nil || f.DynCli == nil {\n\t\treturn fmt.Errorf(\"cli uninitialized\")\n\t}\n\n\tfmt.Println(\"Fetching the snapshot or PVC.\")\n\tvs, restorePVC, sourcePVC, sc, err := f.restoreSteps.ValidateArgs(ctx, args)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate arguments.\")\n\t}\n\tf.snapshot = vs\n\n\tfmt.Println(\"Creating the browser pod & mounting the PVCs.\")\n\tvar restoreMountPath string\n\tf.pod, f.restorePVC, restoreMountPath, err = f.restoreSteps.CreateInspectorApplication(ctx, args, f.snapshot, restorePVC, sourcePVC, sc)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create inspector application.\")\n\t}\n\n\tif args.Path != \"\" {\n\t\tfmt.Printf(\"Restoring the file %s\\n\", args.Path)\n\t\t_, err := f.restoreSteps.ExecuteCopyCommand(ctx, args, f.pod, restoreMountPath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to execute cp command in pod.\")\n\t\t}\n\t\tif args.FromSnapshotName != \"\" {\n\t\t\tfmt.Printf(\"File restored from VolumeSnapshot %s to Source PVC %s.\\n\", f.snapshot.Name, sourcePVC.Name)\n\t\t} else {\n\t\t\tfmt.Printf(\"File restored from PVC %s to Source PVC %s.\\n\", f.restorePVC.Name, sourcePVC.Name)\n\t\t}\n\t\treturn nil\n\t}\n\n\tfmt.Println(\"Forwarding the port.\")\n\terr = f.restoreSteps.PortForwardAPod(f.pod, args.LocalPort)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to port forward Pod.\")\n\t}\n\n\treturn nil\n}\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_file_restore_stepper.go -package=mocks . FileRestoreStepper\ntype FileRestoreStepper interface {\n\tValidateArgs(ctx context.Context, args *types.FileRestoreArgs) (*snapv1.VolumeSnapshot, *v1.PersistentVolumeClaim, *v1.PersistentVolumeClaim, *sv1.StorageClass, error)\n\tCreateInspectorApplication(ctx context.Context, args *types.FileRestoreArgs, snapshot *snapv1.VolumeSnapshot, restorePVC *v1.PersistentVolumeClaim, sourcePVC *v1.PersistentVolumeClaim, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, string, error)\n\tExecuteCopyCommand(ctx context.Context, args *types.FileRestoreArgs, pod *v1.Pod, restoreMountPath string) (string, error)\n\tPortForwardAPod(pod *v1.Pod, localPort int) error\n\tCleanup(ctx context.Context, args *types.FileRestoreArgs, restorePVC *v1.PersistentVolumeClaim, pod *v1.Pod)\n}\n\ntype fileRestoreSteps struct {\n\tvalidateOps          ArgumentValidator\n\tversionFetchOps      ApiVersionFetcher\n\tcreateAppOps         ApplicationCreator\n\tportForwardOps       PortForwarder\n\tcleanerOps           Cleaner\n\tkubeExecutor         KubeExecutor\n\tSnapshotGroupVersion *metav1.GroupVersionForDiscovery\n}\n\nfunc (f *fileRestoreSteps) ValidateArgs(ctx context.Context, args *types.FileRestoreArgs) (*snapv1.VolumeSnapshot, *v1.PersistentVolumeClaim, *v1.PersistentVolumeClaim, *sv1.StorageClass, error) {\n\tif err := args.Validate(); err != nil {\n\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to validate input arguments\")\n\t}\n\tif err := f.validateOps.ValidateNamespace(ctx, args.Namespace); err != nil {\n\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to validate Namespace\")\n\t}\n\tgroupVersion, err := f.versionFetchOps.GetCSISnapshotGroupVersion()\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to fetch groupVersion\")\n\t}\n\tf.SnapshotGroupVersion = groupVersion\n\tvar snapshot *snapv1.VolumeSnapshot\n\tvar restorePVC, sourcePVC *v1.PersistentVolumeClaim\n\tvar sc *sv1.StorageClass\n\tif args.FromSnapshotName != \"\" {\n\t\tfmt.Println(\"Fetching the snapshot.\")\n\t\tsnapshot, err := f.validateOps.ValidateVolumeSnapshot(ctx, args.FromSnapshotName, args.Namespace, groupVersion)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to validate VolumeSnapshot\")\n\t\t}\n\t\tif args.ToPVCName == \"\" {\n\t\t\tfmt.Println(\"Fetching the source PVC from snapshot.\")\n\t\t\tif *snapshot.Spec.Source.PersistentVolumeClaimName == \"\" {\n\t\t\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to fetch source PVC. VolumeSnapshot does not have a PVC as it's source\")\n\t\t\t}\n\t\t\tsourcePVC, err = f.validateOps.ValidatePVC(ctx, *snapshot.Spec.Source.PersistentVolumeClaimName, args.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to validate source PVC\")\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Println(\"Fetching the source PVC.\")\n\t\t\tsourcePVC, err = f.validateOps.ValidatePVC(ctx, args.ToPVCName, args.Namespace)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to validate source PVC\")\n\t\t\t}\n\t\t}\n\t\tsc, err = f.validateOps.ValidateStorageClass(ctx, *sourcePVC.Spec.StorageClassName)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to validate StorageClass for source PVC\")\n\t\t}\n\t\tuVSC, err := f.validateOps.ValidateVolumeSnapshotClass(ctx, *snapshot.Spec.VolumeSnapshotClassName, groupVersion)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to validate VolumeSnapshotClass\")\n\t\t}\n\t\tvscDriver := getDriverNameFromUVSC(*uVSC, groupVersion.GroupVersion)\n\t\tif sc.Provisioner != vscDriver {\n\t\t\treturn nil, nil, nil, nil, fmt.Errorf(\"provisioner for StorageClass (%s) and VolumeSnapshotClass driver (%s) are different\", sc.Provisioner, vscDriver)\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Fetching the restore PVC.\")\n\t\trestorePVC, err = f.validateOps.ValidatePVC(ctx, args.FromPVCName, args.Namespace)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to validate restore PVC\")\n\t\t}\n\t\tfmt.Println(\"Fetching the source PVC.\")\n\t\tsourcePVC, err = f.validateOps.ValidatePVC(ctx, args.ToPVCName, args.Namespace)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to validate source PVC\")\n\t\t}\n\t\t_, err = f.validateOps.ValidateStorageClass(ctx, *restorePVC.Spec.StorageClassName)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to validate StorageClass for restore PVC\")\n\t\t}\n\t\tsc, err = f.validateOps.ValidateStorageClass(ctx, *sourcePVC.Spec.StorageClassName)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, nil, errors.Wrap(err, \"failed to validate StorageClass for source PVC\")\n\t\t}\n\t}\n\tfor _, sourceAccessMode := range sourcePVC.Spec.AccessModes {\n\t\tif sourceAccessMode == v1.ReadWriteOncePod {\n\t\t\treturn nil, nil, nil, nil, fmt.Errorf(\"unsupported %s AccessMode found in source PVC. Supported AccessModes are ReadOnlyMany & ReadWriteMany\", sourceAccessMode)\n\t\t}\n\t}\n\n\treturn snapshot, restorePVC, sourcePVC, sc, nil\n}\n\nfunc (f *fileRestoreSteps) CreateInspectorApplication(ctx context.Context, args *types.FileRestoreArgs, snapshot *snapv1.VolumeSnapshot, restorePVC *v1.PersistentVolumeClaim, sourcePVC *v1.PersistentVolumeClaim, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, string, error) {\n\trestoreMountPath := \"/restore-pvc-data\"\n\tif args.FromSnapshotName != \"\" {\n\t\tsnapshotAPIGroup := \"snapshot.storage.k8s.io\"\n\t\tsnapshotKind := \"VolumeSnapshot\"\n\t\tdataSource := &v1.TypedLocalObjectReference{\n\t\t\tAPIGroup: &snapshotAPIGroup,\n\t\t\tKind:     snapshotKind,\n\t\t\tName:     snapshot.Name,\n\t\t}\n\t\tpvcArgs := &types.CreatePVCArgs{\n\t\t\tGenerateName: clonedPVCGenerateName,\n\t\t\tStorageClass: storageClass.Name,\n\t\t\tNamespace:    args.Namespace,\n\t\t\tDataSource:   dataSource,\n\t\t\tRestoreSize:  snapshot.Status.RestoreSize,\n\t\t}\n\t\tvar err error\n\t\trestorePVC, err = f.createAppOps.CreatePVC(ctx, pvcArgs)\n\t\tif err != nil {\n\t\t\treturn nil, nil, \"\", errors.Wrap(err, \"failed to restore PVC\")\n\t\t}\n\t\trestoreMountPath = \"/snapshot-data\"\n\t}\n\tpodArgs := &types.CreatePodArgs{\n\t\tGenerateName:   clonedPodGenerateName,\n\t\tNamespace:      args.Namespace,\n\t\tRunAsUser:      args.RunAsUser,\n\t\tContainerImage: \"filebrowser/filebrowser:v2\",\n\t\tContainerArgs:  []string{\"--noauth\"},\n\t\tPVCMap: map[string]types.VolumePath{\n\t\t\trestorePVC.Name: {\n\t\t\t\tMountPath: fmt.Sprintf(\"/srv%s\", restoreMountPath),\n\t\t\t},\n\t\t\tsourcePVC.Name: {\n\t\t\t\tMountPath: \"/srv/source-data\",\n\t\t\t},\n\t\t},\n\t}\n\tif args.Path != \"\" {\n\t\tpodArgs = &types.CreatePodArgs{\n\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\tNamespace:      args.Namespace,\n\t\t\tRunAsUser:      args.RunAsUser,\n\t\t\tContainerImage: \"alpine:3.19\",\n\t\t\tCommand:        []string{\"/bin/sh\"},\n\t\t\tContainerArgs:  []string{\"-c\", \"while true; do sleep 3600; done\"},\n\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\trestorePVC.Name: {\n\t\t\t\t\tMountPath: restoreMountPath,\n\t\t\t\t},\n\t\t\t\tsourcePVC.Name: {\n\t\t\t\t\tMountPath: \"/source-data\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\tpod, err := f.createAppOps.CreatePod(ctx, podArgs)\n\tif err != nil {\n\t\treturn nil, restorePVC, \"\", errors.Wrap(err, \"failed to create browse Pod\")\n\t}\n\tif err = f.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil {\n\t\treturn pod, restorePVC, \"\", errors.Wrap(err, \"pod failed to become ready\")\n\t}\n\treturn pod, restorePVC, restoreMountPath, nil\n}\n\nfunc (f *fileRestoreSteps) ExecuteCopyCommand(ctx context.Context, args *types.FileRestoreArgs, pod *v1.Pod, restoreMountPath string) (string, error) {\n\tcommand := []string{\"cp\", \"-rf\", fmt.Sprintf(\"%s%s\", restoreMountPath, args.Path), fmt.Sprintf(\"/source-data%s\", args.Path)}\n\tstdout, err := f.kubeExecutor.Exec(ctx, args.Namespace, pod.Name, pod.Spec.Containers[0].Name, command)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"error running command:(%v)\", command)\n\t}\n\treturn stdout, nil\n}\n\nfunc (f *fileRestoreSteps) PortForwardAPod(pod *v1.Pod, localPort int) error {\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tstopChan, readyChan, errChan := make(chan struct{}, 1), make(chan struct{}, 1), make(chan string)\n\tout, errOut := new(bytes.Buffer), new(bytes.Buffer)\n\tcfg, err := f.portForwardOps.FetchRestConfig()\n\tif err != nil {\n\t\treturn errors.New(\"failed to fetch rest config\")\n\t}\n\tsigs := make(chan os.Signal, 1)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\n\tgo func() {\n\t\t<-sigs\n\t\tfmt.Println(\"\\nStopping port forward.\")\n\t\tclose(stopChan)\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tpfArgs := &types.PortForwardAPodRequest{\n\t\t\tRestConfig:   cfg,\n\t\t\tPod:          pod,\n\t\t\tLocalPort:    localPort,\n\t\t\tPodPort:      80,\n\t\t\tOutStream:    bytes.Buffer(*out),\n\t\t\tErrOutStream: bytes.Buffer(*errOut),\n\t\t\tStopCh:       stopChan,\n\t\t\tReadyCh:      readyChan,\n\t\t}\n\t\terr = f.portForwardOps.PortForwardAPod(pfArgs)\n\t\tif err != nil {\n\t\t\terrChan <- fmt.Sprintf(\"Failed to port forward (%s)\", err.Error())\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-readyChan:\n\t\turl := fmt.Sprintf(\"http://localhost:%d/\", localPort)\n\t\tfmt.Printf(\"Port forwarding is ready to get traffic. visit %s\\n\", url)\n\t\topenbrowser(url)\n\t\twg.Wait()\n\tcase msg := <-errChan:\n\t\treturn errors.New(msg)\n\t}\n\n\treturn nil\n}\n\nfunc (f *fileRestoreSteps) Cleanup(ctx context.Context, args *types.FileRestoreArgs, restorePVC *v1.PersistentVolumeClaim, pod *v1.Pod) {\n\tif args.FromSnapshotName != \"\" {\n\t\tfmt.Println(\"Cleaning up restore PVC.\")\n\t\tif restorePVC != nil {\n\t\t\terr := f.cleanerOps.DeletePVC(ctx, restorePVC.Name, restorePVC.Namespace)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Failed to delete restore PVC\", restorePVC)\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Println(\"Cleaning up browser pod.\")\n\tif pod != nil {\n\t\terr := f.cleanerOps.DeletePod(ctx, pod.Name, pod.Namespace)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Failed to delete Pod\", pod)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/csi/file_restore_inspector_steps_test.go",
    "content": "package csi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/kastenhq/kubestr/pkg/common\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/mocks\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t. \"gopkg.in/check.v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n)\n\nfunc (s *CSITestSuite) TestFileRestoreValidateArgs(c *C) {\n\tctx := context.Background()\n\tscName := \"sc\"\n\tvscName := \"vsc\"\n\tpvcName := \"pvc\"\n\ttype fields struct {\n\t\tvalidateOps *mocks.MockArgumentValidator\n\t\tversionOps  *mocks.MockApiVersionFetcher\n\t}\n\tfor _, tc := range []struct {\n\t\targs       *types.FileRestoreArgs\n\t\tprepare    func(f *fields)\n\t\terrChecker Checker\n\t}{\n\t\t{ // valid args\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(\n\t\t\t\t\t\t&metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), \"vs\", \"ns\", gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"vs\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: snapv1.VolumeSnapshotSpec{\n\t\t\t\t\t\t\t\tSource: snapv1.VolumeSnapshotSource{\n\t\t\t\t\t\t\t\t\tPersistentVolumeClaimName: &pvcName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tVolumeSnapshotClassName: &vscName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"pvc\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), scName).Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), \"vsc\", &metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t}).Return(&unstructured.Unstructured{\n\t\t\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\t\tcommon.VolSnapClassDriverKey: \"p1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{ // valid args\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tFromPVCName: \"restorePVC\",\n\t\t\t\tToPVCName:   \"sourcePVC\",\n\t\t\t\tNamespace:   \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(\n\t\t\t\t\t\t&metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"restorePVC\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"restorePVC\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"sourcePVC\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"sourcePVC\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), scName).Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), scName).Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{ // driver mismatch\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(\n\t\t\t\t\t\t&metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), \"vs\", \"ns\", gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"vs\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: snapv1.VolumeSnapshotSpec{\n\t\t\t\t\t\t\t\tSource: snapv1.VolumeSnapshotSource{\n\t\t\t\t\t\t\t\t\tPersistentVolumeClaimName: &pvcName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tVolumeSnapshotClassName: &vscName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"pvc\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), \"vsc\", &metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t}).Return(&unstructured.Unstructured{\n\t\t\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\t\tcommon.VolSnapClassDriverKey: \"p2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // vsc error\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), \"vs\", \"ns\", gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"vs\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: snapv1.VolumeSnapshotSpec{\n\t\t\t\t\t\t\t\tSource: snapv1.VolumeSnapshotSource{\n\t\t\t\t\t\t\t\t\tPersistentVolumeClaimName: &pvcName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tVolumeSnapshotClassName: &vscName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"pvc\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"vsc error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // get driver versionn error\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, fmt.Errorf(\"driver version error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // sc error\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), \"vs\", \"ns\", gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"vs\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: snapv1.VolumeSnapshotSpec{\n\t\t\t\t\t\t\t\tSource: snapv1.VolumeSnapshotSource{\n\t\t\t\t\t\t\t\t\tPersistentVolumeClaimName: &pvcName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tVolumeSnapshotClassName: &vscName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"pvc\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"sc error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate vs error\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"validate vs error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate ns error\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(fmt.Errorf(\"validate ns error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate vs error\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tFromSnapshotName: \"\",\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate ns error\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tFromSnapshotName: \"dfd\",\n\t\t\t\tNamespace:        \"\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tvalidateOps: mocks.NewMockArgumentValidator(ctrl),\n\t\t\tversionOps:  mocks.NewMockApiVersionFetcher(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &fileRestoreSteps{\n\t\t\tvalidateOps:     f.validateOps,\n\t\t\tversionFetchOps: f.versionOps,\n\t\t}\n\t\t_, _, _, _, err := stepper.ValidateArgs(ctx, tc.args)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestCreateInspectorApplicationForFileRestore(c *C) {\n\tctx := context.Background()\n\tresourceQuantity := resource.MustParse(\"1Gi\")\n\tsnapshotAPIGroup := \"snapshot.storage.k8s.io\"\n\ttype fields struct {\n\t\tcreateAppOps *mocks.MockApplicationCreator\n\t}\n\tfor _, tc := range []struct {\n\t\targs         *types.FileRestoreArgs\n\t\tfromSnapshot *snapv1.VolumeSnapshot\n\t\tfromPVC      *v1.PersistentVolumeClaim\n\t\tsc           *sv1.StorageClass\n\t\tprepare      func(f *fields)\n\t\terrChecker   Checker\n\t\tpodChecker   Checker\n\t\tpvcChecker   Checker\n\t}{\n\t\t{\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t\tRunAsUser:        100,\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfromSnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"vs\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tfromPVC: &v1.PersistentVolumeClaim{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{\n\t\t\t\t\t\tGenerateName: clonedPVCGenerateName,\n\t\t\t\t\t\tStorageClass: \"sc\",\n\t\t\t\t\t\tNamespace:    \"ns\",\n\t\t\t\t\t\tDataSource: &v1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &snapshotAPIGroup,\n\t\t\t\t\t\t\tKind:     \"VolumeSnapshot\",\n\t\t\t\t\t\t\tName:     \"vs\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t\t}).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"restorePVC\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tContainerArgs:  []string{\"--noauth\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"filebrowser/filebrowser:v2\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"restorePVC\": {\n\t\t\t\t\t\t\t\tMountPath: \"/srv/snapshot-data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"sourcePVC\": {\n\t\t\t\t\t\t\t\tMountPath: \"/srv/source-data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pod\").Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tNamespace:   \"ns\",\n\t\t\t\tRunAsUser:   100,\n\t\t\t\tFromPVCName: \"restorePVC\",\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfromSnapshot: &snapv1.VolumeSnapshot{},\n\t\t\tfromPVC: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"restorePVC\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tContainerArgs:  []string{\"--noauth\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"filebrowser/filebrowser:v2\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"restorePVC\": {\n\t\t\t\t\t\t\t\tMountPath: \"/srv/restore-pvc-data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"sourcePVC\": {\n\t\t\t\t\t\t\t\tMountPath: \"/srv/source-data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pod\").Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t\tRunAsUser:        100,\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfromSnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"vs\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tfromPVC: &v1.PersistentVolumeClaim{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{\n\t\t\t\t\t\tGenerateName: clonedPVCGenerateName,\n\t\t\t\t\t\tStorageClass: \"sc\",\n\t\t\t\t\t\tNamespace:    \"ns\",\n\t\t\t\t\t\tDataSource: &v1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &snapshotAPIGroup,\n\t\t\t\t\t\t\tKind:     \"VolumeSnapshot\",\n\t\t\t\t\t\t\tName:     \"vs\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t\t}).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"restorePVC\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tContainerArgs:  []string{\"--noauth\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"filebrowser/filebrowser:v2\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"restorePVC\": {\n\t\t\t\t\t\t\t\tMountPath: \"/srv/snapshot-data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"sourcePVC\": {\n\t\t\t\t\t\t\t\tMountPath: \"/srv/source-data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pod\").Return(fmt.Errorf(\"pod ready error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t\tRunAsUser:        100,\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfromSnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"vs\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tfromPVC: &v1.PersistentVolumeClaim{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"restorePVC\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"pod  error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t\tRunAsUser:        100,\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tfromSnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"vs\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tfromPVC: &v1.PersistentVolumeClaim{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t\tpvcChecker: IsNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tcreateAppOps: mocks.NewMockApplicationCreator(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &fileRestoreSteps{\n\t\t\tcreateAppOps: f.createAppOps,\n\t\t}\n\t\tsourcePVC := v1.PersistentVolumeClaim{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName:      \"sourcePVC\",\n\t\t\t\tNamespace: tc.args.Namespace,\n\t\t\t},\n\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\tAccessModes: []v1.PersistentVolumeAccessMode{\n\t\t\t\t\tv1.ReadWriteOnce,\n\t\t\t\t},\n\t\t\t\tResources: v1.VolumeResourceRequirements{\n\t\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\t\tv1.ResourceStorage: resource.MustParse(\"1Gi\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tpod, pvc, _, err := stepper.CreateInspectorApplication(ctx, tc.args, tc.fromSnapshot, tc.fromPVC, &sourcePVC, tc.sc)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Check(pod, tc.podChecker)\n\t\tc.Check(pvc, tc.pvcChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestFileRestoreCleanup(c *C) {\n\tctx := context.Background()\n\tgroupversion := &metav1.GroupVersionForDiscovery{\n\t\tGroupVersion: \"gv\",\n\t\tVersion:      \"v\",\n\t}\n\ttype fields struct {\n\t\tcleanerOps *mocks.MockCleaner\n\t}\n\tfor _, tc := range []struct {\n\t\targs       *types.FileRestoreArgs\n\t\trestorePVC *v1.PersistentVolumeClaim\n\t\tpod        *v1.Pod\n\t\tprepare    func(f *fields)\n\t}{\n\t\t{\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t\tRunAsUser:        100,\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t},\n\t\t\trestorePVC: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"restorePVC\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePVC(ctx, \"restorePVC\", \"ns\").Return(nil),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePod(ctx, \"pod\", \"ns\").Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t\tRunAsUser:        100,\n\t\t\t\tFromSnapshotName: \"\",\n\t\t\t\tFromPVCName:      \"restorePVC\",\n\t\t\t\tToPVCName:        \"sourcePVC\",\n\t\t\t},\n\t\t\trestorePVC: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"restorePVC\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePod(ctx, \"pod\", \"ns\").Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\targs: &types.FileRestoreArgs{\n\t\t\t\tNamespace:        \"ns\",\n\t\t\t\tRunAsUser:        100,\n\t\t\t\tFromSnapshotName: \"vs\",\n\t\t\t},\n\t\t\trestorePVC: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"restorePVC\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePVC(ctx, \"restorePVC\", \"ns\").Return(fmt.Errorf(\"err\")),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePod(ctx, \"pod\", \"ns\").Return(fmt.Errorf(\"err\")),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tcleanerOps: mocks.NewMockCleaner(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &fileRestoreSteps{\n\t\t\tcleanerOps:           f.cleanerOps,\n\t\t\tSnapshotGroupVersion: groupversion,\n\t\t}\n\t\tstepper.Cleanup(ctx, tc.args, tc.restorePVC, tc.pod)\n\t}\n}\n"
  },
  {
    "path": "pkg/csi/file_restore_inspector_test.go",
    "content": "package csi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/mocks\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t. \"gopkg.in/check.v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/client-go/dynamic\"\n\tfakedynamic \"k8s.io/client-go/dynamic/fake\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\nfunc (s *CSITestSuite) TestRunFileRestoreHelper(c *C) {\n\tctx := context.Background()\n\ttype fields struct {\n\t\tstepperOps *mocks.MockFileRestoreStepper\n\t}\n\tfor _, tc := range []struct {\n\t\tkubeCli    kubernetes.Interface\n\t\tdynCli     dynamic.Interface\n\t\targs       *types.FileRestoreArgs\n\t\tprepare    func(f *fields)\n\t\terrChecker Checker\n\t}{\n\t\t{\n\t\t\t// success\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.FileRestoreArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{}, &v1.PersistentVolumeClaim{}, &v1.PersistentVolumeClaim{}, &sv1.StorageClass{}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(),\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{}, &v1.PersistentVolumeClaim{}, &v1.PersistentVolumeClaim{}, &sv1.StorageClass{},\n\t\t\t\t\t).Return(\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"\",\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t\tf.stepperOps.EXPECT().PortForwardAPod(\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, gomock.Any(),\n\t\t\t\t\t).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(),\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\t// portforward failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.FileRestoreArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, \"\", nil),\n\t\t\t\t\tf.stepperOps.EXPECT().PortForwardAPod(gomock.Any(), gomock.Any()).Return(fmt.Errorf(\"portforward error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// createapp failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.FileRestoreArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, \"\", fmt.Errorf(\"createapp error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// fetch snapshot failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.FileRestoreArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil, nil, fmt.Errorf(\"snapshot error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// validate failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.FileRestoreArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil, nil, fmt.Errorf(\"validate error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// emptycli failure\n\t\t\tkubeCli: nil,\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.FileRestoreArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// emptydyncli failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  nil,\n\t\t\targs:    &types.FileRestoreArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tstepperOps: mocks.NewMockFileRestoreStepper(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\trunner := &FileRestoreRunner{\n\t\t\tKubeCli:      tc.kubeCli,\n\t\t\tDynCli:       tc.dynCli,\n\t\t\trestoreSteps: f.stepperOps,\n\t\t}\n\t\terr := runner.RunFileRestoreHelper(ctx, tc.args)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestFileRestoreRunner(c *C) {\n\tctx := context.Background()\n\tr := &FileRestoreRunner{\n\t\trestoreSteps: &fileRestoreSteps{},\n\t}\n\targs := types.FileRestoreArgs{}\n\terr := r.RunFileRestoreHelper(ctx, &args)\n\tc.Check(err, NotNil)\n}\n"
  },
  {
    "path": "pkg/csi/mocks/mock_api_version_fetcher.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: ApiVersionFetcher)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\n// MockApiVersionFetcher is a mock of ApiVersionFetcher interface.\ntype MockApiVersionFetcher struct {\n\tctrl     *gomock.Controller\n\trecorder *MockApiVersionFetcherMockRecorder\n}\n\n// MockApiVersionFetcherMockRecorder is the mock recorder for MockApiVersionFetcher.\ntype MockApiVersionFetcherMockRecorder struct {\n\tmock *MockApiVersionFetcher\n}\n\n// NewMockApiVersionFetcher creates a new mock instance.\nfunc NewMockApiVersionFetcher(ctrl *gomock.Controller) *MockApiVersionFetcher {\n\tmock := &MockApiVersionFetcher{ctrl: ctrl}\n\tmock.recorder = &MockApiVersionFetcherMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockApiVersionFetcher) EXPECT() *MockApiVersionFetcherMockRecorder {\n\treturn m.recorder\n}\n\n// GetCSISnapshotGroupVersion mocks base method.\nfunc (m *MockApiVersionFetcher) GetCSISnapshotGroupVersion() (*v1.GroupVersionForDiscovery, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetCSISnapshotGroupVersion\")\n\tret0, _ := ret[0].(*v1.GroupVersionForDiscovery)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// GetCSISnapshotGroupVersion indicates an expected call of GetCSISnapshotGroupVersion.\nfunc (mr *MockApiVersionFetcherMockRecorder) GetCSISnapshotGroupVersion() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"GetCSISnapshotGroupVersion\", reflect.TypeOf((*MockApiVersionFetcher)(nil).GetCSISnapshotGroupVersion))\n}\n"
  },
  {
    "path": "pkg/csi/mocks/mock_application_creator.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: ApplicationCreator)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n\ttypes \"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tv1 \"k8s.io/api/core/v1\"\n)\n\n// MockApplicationCreator is a mock of ApplicationCreator interface.\ntype MockApplicationCreator struct {\n\tctrl     *gomock.Controller\n\trecorder *MockApplicationCreatorMockRecorder\n}\n\n// MockApplicationCreatorMockRecorder is the mock recorder for MockApplicationCreator.\ntype MockApplicationCreatorMockRecorder struct {\n\tmock *MockApplicationCreator\n}\n\n// NewMockApplicationCreator creates a new mock instance.\nfunc NewMockApplicationCreator(ctrl *gomock.Controller) *MockApplicationCreator {\n\tmock := &MockApplicationCreator{ctrl: ctrl}\n\tmock.recorder = &MockApplicationCreatorMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockApplicationCreator) EXPECT() *MockApplicationCreatorMockRecorder {\n\treturn m.recorder\n}\n\n// CreatePVC mocks base method.\nfunc (m *MockApplicationCreator) CreatePVC(arg0 context.Context, arg1 *types.CreatePVCArgs) (*v1.PersistentVolumeClaim, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreatePVC\", arg0, arg1)\n\tret0, _ := ret[0].(*v1.PersistentVolumeClaim)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// CreatePVC indicates an expected call of CreatePVC.\nfunc (mr *MockApplicationCreatorMockRecorder) CreatePVC(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreatePVC\", reflect.TypeOf((*MockApplicationCreator)(nil).CreatePVC), arg0, arg1)\n}\n\n// CreatePod mocks base method.\nfunc (m *MockApplicationCreator) CreatePod(arg0 context.Context, arg1 *types.CreatePodArgs) (*v1.Pod, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreatePod\", arg0, arg1)\n\tret0, _ := ret[0].(*v1.Pod)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// CreatePod indicates an expected call of CreatePod.\nfunc (mr *MockApplicationCreatorMockRecorder) CreatePod(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreatePod\", reflect.TypeOf((*MockApplicationCreator)(nil).CreatePod), arg0, arg1)\n}\n\n// WaitForPodReady mocks base method.\nfunc (m *MockApplicationCreator) WaitForPodReady(arg0 context.Context, arg1, arg2 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WaitForPodReady\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// WaitForPodReady indicates an expected call of WaitForPodReady.\nfunc (mr *MockApplicationCreatorMockRecorder) WaitForPodReady(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"WaitForPodReady\", reflect.TypeOf((*MockApplicationCreator)(nil).WaitForPodReady), arg0, arg1, arg2)\n}\n\nfunc (m *MockApplicationCreator) WaitForPVCReady(ctx context.Context, namespace string, pvcName string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"WaitForPVCReady\", ctx, namespace, pvcName)\n\terr, _ := ret[0].(error)\n\treturn err\n}\n\n// WaitForPodReady indicates an expected call of WaitForPVCReady.\nfunc (mr *MockApplicationCreatorMockRecorder) WaitForPVCReady(ctx, namespace, pvcName interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"WaitForPVCReady\", reflect.TypeOf((*MockApplicationCreator)(nil).WaitForPVCReady), ctx, namespace, pvcName)\n}"
  },
  {
    "path": "pkg/csi/mocks/mock_argument_validator.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: ArgumentValidator)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n\tv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\tv10 \"k8s.io/api/core/v1\"\n\tv11 \"k8s.io/api/storage/v1\"\n\tv12 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tunstructured \"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n)\n\n// MockArgumentValidator is a mock of ArgumentValidator interface.\ntype MockArgumentValidator struct {\n\tctrl     *gomock.Controller\n\trecorder *MockArgumentValidatorMockRecorder\n}\n\n// MockArgumentValidatorMockRecorder is the mock recorder for MockArgumentValidator.\ntype MockArgumentValidatorMockRecorder struct {\n\tmock *MockArgumentValidator\n}\n\n// NewMockArgumentValidator creates a new mock instance.\nfunc NewMockArgumentValidator(ctrl *gomock.Controller) *MockArgumentValidator {\n\tmock := &MockArgumentValidator{ctrl: ctrl}\n\tmock.recorder = &MockArgumentValidatorMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockArgumentValidator) EXPECT() *MockArgumentValidatorMockRecorder {\n\treturn m.recorder\n}\n\n// FetchPV mocks base method.\nfunc (m *MockArgumentValidator) FetchPV(arg0 context.Context, arg1 string) (*v10.PersistentVolume, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FetchPV\", arg0, arg1)\n\tret0, _ := ret[0].(*v10.PersistentVolume)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// FetchPV indicates an expected call of FetchPV.\nfunc (mr *MockArgumentValidatorMockRecorder) FetchPV(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchPV\", reflect.TypeOf((*MockArgumentValidator)(nil).FetchPV), arg0, arg1)\n}\n\n// ValidateNamespace mocks base method.\nfunc (m *MockArgumentValidator) ValidateNamespace(arg0 context.Context, arg1 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidateNamespace\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// ValidateNamespace indicates an expected call of ValidateNamespace.\nfunc (mr *MockArgumentValidatorMockRecorder) ValidateNamespace(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ValidateNamespace\", reflect.TypeOf((*MockArgumentValidator)(nil).ValidateNamespace), arg0, arg1)\n}\n\n// ValidatePVC mocks base method.\nfunc (m *MockArgumentValidator) ValidatePVC(arg0 context.Context, arg1, arg2 string) (*v10.PersistentVolumeClaim, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidatePVC\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(*v10.PersistentVolumeClaim)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// ValidatePVC indicates an expected call of ValidatePVC.\nfunc (mr *MockArgumentValidatorMockRecorder) ValidatePVC(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ValidatePVC\", reflect.TypeOf((*MockArgumentValidator)(nil).ValidatePVC), arg0, arg1, arg2)\n}\n\n// ValidateStorageClass mocks base method.\nfunc (m *MockArgumentValidator) ValidateStorageClass(arg0 context.Context, arg1 string) (*v11.StorageClass, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidateStorageClass\", arg0, arg1)\n\tret0, _ := ret[0].(*v11.StorageClass)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// ValidateStorageClass indicates an expected call of ValidateStorageClass.\nfunc (mr *MockArgumentValidatorMockRecorder) ValidateStorageClass(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ValidateStorageClass\", reflect.TypeOf((*MockArgumentValidator)(nil).ValidateStorageClass), arg0, arg1)\n}\n\n// ValidateVolumeSnapshot mocks base method.\nfunc (m *MockArgumentValidator) ValidateVolumeSnapshot(arg0 context.Context, arg1, arg2 string, arg3 *v12.GroupVersionForDiscovery) (*v1.VolumeSnapshot, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidateVolumeSnapshot\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(*v1.VolumeSnapshot)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// ValidateVolumeSnapshot indicates an expected call of ValidateVolumeSnapshot.\nfunc (mr *MockArgumentValidatorMockRecorder) ValidateVolumeSnapshot(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ValidateVolumeSnapshot\", reflect.TypeOf((*MockArgumentValidator)(nil).ValidateVolumeSnapshot), arg0, arg1, arg2, arg3)\n}\n\n// ValidateVolumeSnapshotClass mocks base method.\nfunc (m *MockArgumentValidator) ValidateVolumeSnapshotClass(arg0 context.Context, arg1 string, arg2 *v12.GroupVersionForDiscovery) (*unstructured.Unstructured, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidateVolumeSnapshotClass\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(*unstructured.Unstructured)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// ValidateVolumeSnapshotClass indicates an expected call of ValidateVolumeSnapshotClass.\nfunc (mr *MockArgumentValidatorMockRecorder) ValidateVolumeSnapshotClass(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ValidateVolumeSnapshotClass\", reflect.TypeOf((*MockArgumentValidator)(nil).ValidateVolumeSnapshotClass), arg0, arg1, arg2)\n}\n"
  },
  {
    "path": "pkg/csi/mocks/mock_cleaner.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: Cleaner)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\n// MockCleaner is a mock of Cleaner interface.\ntype MockCleaner struct {\n\tctrl     *gomock.Controller\n\trecorder *MockCleanerMockRecorder\n}\n\n// MockCleanerMockRecorder is the mock recorder for MockCleaner.\ntype MockCleanerMockRecorder struct {\n\tmock *MockCleaner\n}\n\n// NewMockCleaner creates a new mock instance.\nfunc NewMockCleaner(ctrl *gomock.Controller) *MockCleaner {\n\tmock := &MockCleaner{ctrl: ctrl}\n\tmock.recorder = &MockCleanerMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockCleaner) EXPECT() *MockCleanerMockRecorder {\n\treturn m.recorder\n}\n\n// DeletePVC mocks base method.\nfunc (m *MockCleaner) DeletePVC(arg0 context.Context, arg1, arg2 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeletePVC\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// DeletePVC indicates an expected call of DeletePVC.\nfunc (mr *MockCleanerMockRecorder) DeletePVC(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeletePVC\", reflect.TypeOf((*MockCleaner)(nil).DeletePVC), arg0, arg1, arg2)\n}\n\n// DeletePod mocks base method.\nfunc (m *MockCleaner) DeletePod(arg0 context.Context, arg1, arg2 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeletePod\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// DeletePod indicates an expected call of DeletePod.\nfunc (mr *MockCleanerMockRecorder) DeletePod(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeletePod\", reflect.TypeOf((*MockCleaner)(nil).DeletePod), arg0, arg1, arg2)\n}\n\n// DeleteSnapshot mocks base method.\nfunc (m *MockCleaner) DeleteSnapshot(arg0 context.Context, arg1, arg2 string, arg3 *v1.GroupVersionForDiscovery) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"DeleteSnapshot\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// DeleteSnapshot indicates an expected call of DeleteSnapshot.\nfunc (mr *MockCleanerMockRecorder) DeleteSnapshot(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteSnapshot\", reflect.TypeOf((*MockCleaner)(nil).DeleteSnapshot), arg0, arg1, arg2, arg3)\n}\n"
  },
  {
    "path": "pkg/csi/mocks/mock_data_validator.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: DataValidator)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n)\n\n// MockDataValidator is a mock of DataValidator interface.\ntype MockDataValidator struct {\n\tctrl     *gomock.Controller\n\trecorder *MockDataValidatorMockRecorder\n}\n\n// MockDataValidatorMockRecorder is the mock recorder for MockDataValidator.\ntype MockDataValidatorMockRecorder struct {\n\tmock *MockDataValidator\n}\n\n// NewMockDataValidator creates a new mock instance.\nfunc NewMockDataValidator(ctrl *gomock.Controller) *MockDataValidator {\n\tmock := &MockDataValidator{ctrl: ctrl}\n\tmock.recorder = &MockDataValidatorMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockDataValidator) EXPECT() *MockDataValidatorMockRecorder {\n\treturn m.recorder\n}\n\n// FetchPodData mocks base method.\nfunc (m *MockDataValidator) FetchPodData(arg0 context.Context, arg1, arg2 string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FetchPodData\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// FetchPodData indicates an expected call of FetchPodData.\nfunc (mr *MockDataValidatorMockRecorder) FetchPodData(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchPodData\", reflect.TypeOf((*MockDataValidator)(nil).FetchPodData), arg0, arg1, arg2)\n}\n"
  },
  {
    "path": "pkg/csi/mocks/mock_file_restore_stepper.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: FileRestoreStepper)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n\ttypes \"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\tv10 \"k8s.io/api/core/v1\"\n\tv11 \"k8s.io/api/storage/v1\"\n)\n\n// MockFileRestoreStepper is a mock of FileRestoreStepper interface.\ntype MockFileRestoreStepper struct {\n\tctrl     *gomock.Controller\n\trecorder *MockFileRestoreStepperMockRecorder\n}\n\n// MockFileRestoreStepperMockRecorder is the mock recorder for MockFileRestoreStepper.\ntype MockFileRestoreStepperMockRecorder struct {\n\tmock *MockFileRestoreStepper\n}\n\n// NewMockFileRestoreStepper creates a new mock instance.\nfunc NewMockFileRestoreStepper(ctrl *gomock.Controller) *MockFileRestoreStepper {\n\tmock := &MockFileRestoreStepper{ctrl: ctrl}\n\tmock.recorder = &MockFileRestoreStepperMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockFileRestoreStepper) EXPECT() *MockFileRestoreStepperMockRecorder {\n\treturn m.recorder\n}\n\n// Cleanup mocks base method.\nfunc (m *MockFileRestoreStepper) Cleanup(arg0 context.Context, arg1 *types.FileRestoreArgs, arg2 *v10.PersistentVolumeClaim, arg3 *v10.Pod) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Cleanup\", arg0, arg1, arg2, arg3)\n}\n\n// Cleanup indicates an expected call of Cleanup.\nfunc (mr *MockFileRestoreStepperMockRecorder) Cleanup(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Cleanup\", reflect.TypeOf((*MockFileRestoreStepper)(nil).Cleanup), arg0, arg1, arg2, arg3)\n}\n\n// CreateInspectorApplication mocks base method.\nfunc (m *MockFileRestoreStepper) CreateInspectorApplication(arg0 context.Context, arg1 *types.FileRestoreArgs, arg2 *v1.VolumeSnapshot, arg3, arg4 *v10.PersistentVolumeClaim, arg5 *v11.StorageClass) (*v10.Pod, *v10.PersistentVolumeClaim, string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateInspectorApplication\", arg0, arg1, arg2, arg3, arg4, arg5)\n\tret0, _ := ret[0].(*v10.Pod)\n\tret1, _ := ret[1].(*v10.PersistentVolumeClaim)\n\tret2, _ := ret[2].(string)\n\tret3, _ := ret[3].(error)\n\treturn ret0, ret1, ret2, ret3\n}\n\n// CreateInspectorApplication indicates an expected call of CreateInspectorApplication.\nfunc (mr *MockFileRestoreStepperMockRecorder) CreateInspectorApplication(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateInspectorApplication\", reflect.TypeOf((*MockFileRestoreStepper)(nil).CreateInspectorApplication), arg0, arg1, arg2, arg3, arg4, arg5)\n}\n\n// ExecuteCopyCommand mocks base method.\nfunc (m *MockFileRestoreStepper) ExecuteCopyCommand(arg0 context.Context, arg1 *types.FileRestoreArgs, arg2 *v10.Pod, arg3 string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ExecuteCopyCommand\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// ExecuteCopyCommand indicates an expected call of ExecuteCopyCommand.\nfunc (mr *MockFileRestoreStepperMockRecorder) ExecuteCopyCommand(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ExecuteCopyCommand\", reflect.TypeOf((*MockFileRestoreStepper)(nil).ExecuteCopyCommand), arg0, arg1, arg2, arg3)\n}\n\n// PortForwardAPod mocks base method.\nfunc (m *MockFileRestoreStepper) PortForwardAPod(arg0 *v10.Pod, arg1 int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"PortForwardAPod\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// PortForwardAPod indicates an expected call of PortForwardAPod.\nfunc (mr *MockFileRestoreStepperMockRecorder) PortForwardAPod(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"PortForwardAPod\", reflect.TypeOf((*MockFileRestoreStepper)(nil).PortForwardAPod), arg0, arg1)\n}\n\n// ValidateArgs mocks base method.\nfunc (m *MockFileRestoreStepper) ValidateArgs(arg0 context.Context, arg1 *types.FileRestoreArgs) (*v1.VolumeSnapshot, *v10.PersistentVolumeClaim, *v10.PersistentVolumeClaim, *v11.StorageClass, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidateArgs\", arg0, arg1)\n\tret0, _ := ret[0].(*v1.VolumeSnapshot)\n\tret1, _ := ret[1].(*v10.PersistentVolumeClaim)\n\tret2, _ := ret[2].(*v10.PersistentVolumeClaim)\n\tret3, _ := ret[3].(*v11.StorageClass)\n\tret4, _ := ret[4].(error)\n\treturn ret0, ret1, ret2, ret3, ret4\n}\n\n// ValidateArgs indicates an expected call of ValidateArgs.\nfunc (mr *MockFileRestoreStepperMockRecorder) ValidateArgs(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ValidateArgs\", reflect.TypeOf((*MockFileRestoreStepper)(nil).ValidateArgs), arg0, arg1)\n}\n"
  },
  {
    "path": "pkg/csi/mocks/mock_kube_executor.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: KubeExecutor)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n)\n\n// MockKubeExecutor is a mock of KubeExecutor interface.\ntype MockKubeExecutor struct {\n\tctrl     *gomock.Controller\n\trecorder *MockKubeExecutorMockRecorder\n}\n\n// MockKubeExecutorMockRecorder is the mock recorder for MockKubeExecutor.\ntype MockKubeExecutorMockRecorder struct {\n\tmock *MockKubeExecutor\n}\n\n// NewMockKubeExecutor creates a new mock instance.\nfunc NewMockKubeExecutor(ctrl *gomock.Controller) *MockKubeExecutor {\n\tmock := &MockKubeExecutor{ctrl: ctrl}\n\tmock.recorder = &MockKubeExecutorMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockKubeExecutor) EXPECT() *MockKubeExecutorMockRecorder {\n\treturn m.recorder\n}\n\n// Exec mocks base method.\nfunc (m *MockKubeExecutor) Exec(arg0 context.Context, arg1, arg2, arg3 string, arg4 []string) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Exec\", arg0, arg1, arg2, arg3, arg4)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// Exec indicates an expected call of Exec.\nfunc (mr *MockKubeExecutorMockRecorder) Exec(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Exec\", reflect.TypeOf((*MockKubeExecutor)(nil).Exec), arg0, arg1, arg2, arg3, arg4)\n}\n"
  },
  {
    "path": "pkg/csi/mocks/mock_port_forwarder.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: PortForwarder)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n\ttypes \"github.com/kastenhq/kubestr/pkg/csi/types\"\n\trest \"k8s.io/client-go/rest\"\n)\n\n// MockPortForwarder is a mock of PortForwarder interface.\ntype MockPortForwarder struct {\n\tctrl     *gomock.Controller\n\trecorder *MockPortForwarderMockRecorder\n}\n\n// MockPortForwarderMockRecorder is the mock recorder for MockPortForwarder.\ntype MockPortForwarderMockRecorder struct {\n\tmock *MockPortForwarder\n}\n\n// NewMockPortForwarder creates a new mock instance.\nfunc NewMockPortForwarder(ctrl *gomock.Controller) *MockPortForwarder {\n\tmock := &MockPortForwarder{ctrl: ctrl}\n\tmock.recorder = &MockPortForwarderMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockPortForwarder) EXPECT() *MockPortForwarderMockRecorder {\n\treturn m.recorder\n}\n\n// FetchRestConfig mocks base method.\nfunc (m *MockPortForwarder) FetchRestConfig() (*rest.Config, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"FetchRestConfig\")\n\tret0, _ := ret[0].(*rest.Config)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// FetchRestConfig indicates an expected call of FetchRestConfig.\nfunc (mr *MockPortForwarderMockRecorder) FetchRestConfig() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"FetchRestConfig\", reflect.TypeOf((*MockPortForwarder)(nil).FetchRestConfig))\n}\n\n// PortForwardAPod mocks base method.\nfunc (m *MockPortForwarder) PortForwardAPod(arg0 *types.PortForwardAPodRequest) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"PortForwardAPod\", arg0)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// PortForwardAPod indicates an expected call of PortForwardAPod.\nfunc (mr *MockPortForwarderMockRecorder) PortForwardAPod(arg0 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"PortForwardAPod\", reflect.TypeOf((*MockPortForwarder)(nil).PortForwardAPod), arg0)\n}\n"
  },
  {
    "path": "pkg/csi/mocks/mock_pvc_browser_stepper.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: PVCBrowserStepper)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n\ttypes \"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\tv10 \"k8s.io/api/core/v1\"\n\tv11 \"k8s.io/api/storage/v1\"\n)\n\n// MockPVCBrowserStepper is a mock of PVCBrowserStepper interface.\ntype MockPVCBrowserStepper struct {\n\tctrl     *gomock.Controller\n\trecorder *MockPVCBrowserStepperMockRecorder\n}\n\n// MockPVCBrowserStepperMockRecorder is the mock recorder for MockPVCBrowserStepper.\ntype MockPVCBrowserStepperMockRecorder struct {\n\tmock *MockPVCBrowserStepper\n}\n\n// NewMockPVCBrowserStepper creates a new mock instance.\nfunc NewMockPVCBrowserStepper(ctrl *gomock.Controller) *MockPVCBrowserStepper {\n\tmock := &MockPVCBrowserStepper{ctrl: ctrl}\n\tmock.recorder = &MockPVCBrowserStepperMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockPVCBrowserStepper) EXPECT() *MockPVCBrowserStepperMockRecorder {\n\treturn m.recorder\n}\n\n// Cleanup mocks base method.\nfunc (m *MockPVCBrowserStepper) Cleanup(arg0 context.Context, arg1 *v10.PersistentVolumeClaim, arg2 *v10.Pod, arg3 *v1.VolumeSnapshot) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Cleanup\", arg0, arg1, arg2, arg3)\n}\n\n// Cleanup indicates an expected call of Cleanup.\nfunc (mr *MockPVCBrowserStepperMockRecorder) Cleanup(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Cleanup\", reflect.TypeOf((*MockPVCBrowserStepper)(nil).Cleanup), arg0, arg1, arg2, arg3)\n}\n\n// CreateInspectorApplication mocks base method.\nfunc (m *MockPVCBrowserStepper) CreateInspectorApplication(arg0 context.Context, arg1 *types.PVCBrowseArgs, arg2 *v1.VolumeSnapshot, arg3 *v11.StorageClass) (*v10.Pod, *v10.PersistentVolumeClaim, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateInspectorApplication\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(*v10.Pod)\n\tret1, _ := ret[1].(*v10.PersistentVolumeClaim)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\n// CreateInspectorApplication indicates an expected call of CreateInspectorApplication.\nfunc (mr *MockPVCBrowserStepperMockRecorder) CreateInspectorApplication(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateInspectorApplication\", reflect.TypeOf((*MockPVCBrowserStepper)(nil).CreateInspectorApplication), arg0, arg1, arg2, arg3)\n}\n\n// ExecuteTreeCommand mocks base method.\nfunc (m *MockPVCBrowserStepper) ExecuteTreeCommand(arg0 context.Context, arg1 *types.PVCBrowseArgs, arg2 *v10.Pod) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ExecuteTreeCommand\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// ExecuteTreeCommand indicates an expected call of ExecuteTreeCommand.\nfunc (mr *MockPVCBrowserStepperMockRecorder) ExecuteTreeCommand(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ExecuteTreeCommand\", reflect.TypeOf((*MockPVCBrowserStepper)(nil).ExecuteTreeCommand), arg0, arg1, arg2)\n}\n\n// PortForwardAPod mocks base method.\nfunc (m *MockPVCBrowserStepper) PortForwardAPod(arg0 context.Context, arg1 *v10.Pod, arg2 int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"PortForwardAPod\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// PortForwardAPod indicates an expected call of PortForwardAPod.\nfunc (mr *MockPVCBrowserStepperMockRecorder) PortForwardAPod(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"PortForwardAPod\", reflect.TypeOf((*MockPVCBrowserStepper)(nil).PortForwardAPod), arg0, arg1, arg2)\n}\n\n// SnapshotPVC mocks base method.\nfunc (m *MockPVCBrowserStepper) SnapshotPVC(arg0 context.Context, arg1 *types.PVCBrowseArgs, arg2 string) (*v1.VolumeSnapshot, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"SnapshotPVC\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(*v1.VolumeSnapshot)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// SnapshotPVC indicates an expected call of SnapshotPVC.\nfunc (mr *MockPVCBrowserStepperMockRecorder) SnapshotPVC(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"SnapshotPVC\", reflect.TypeOf((*MockPVCBrowserStepper)(nil).SnapshotPVC), arg0, arg1, arg2)\n}\n\n// ValidateArgs mocks base method.\nfunc (m *MockPVCBrowserStepper) ValidateArgs(arg0 context.Context, arg1 *types.PVCBrowseArgs) (*v11.StorageClass, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidateArgs\", arg0, arg1)\n\tret0, _ := ret[0].(*v11.StorageClass)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// ValidateArgs indicates an expected call of ValidateArgs.\nfunc (mr *MockPVCBrowserStepperMockRecorder) ValidateArgs(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ValidateArgs\", reflect.TypeOf((*MockPVCBrowserStepper)(nil).ValidateArgs), arg0, arg1)\n}\n"
  },
  {
    "path": "pkg/csi/mocks/mock_snapshot_browser_stepper.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: SnapshotBrowserStepper)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n\ttypes \"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\tv10 \"k8s.io/api/core/v1\"\n\tv11 \"k8s.io/api/storage/v1\"\n)\n\n// MockSnapshotBrowserStepper is a mock of SnapshotBrowserStepper interface.\ntype MockSnapshotBrowserStepper struct {\n\tctrl     *gomock.Controller\n\trecorder *MockSnapshotBrowserStepperMockRecorder\n}\n\n// MockSnapshotBrowserStepperMockRecorder is the mock recorder for MockSnapshotBrowserStepper.\ntype MockSnapshotBrowserStepperMockRecorder struct {\n\tmock *MockSnapshotBrowserStepper\n}\n\n// NewMockSnapshotBrowserStepper creates a new mock instance.\nfunc NewMockSnapshotBrowserStepper(ctrl *gomock.Controller) *MockSnapshotBrowserStepper {\n\tmock := &MockSnapshotBrowserStepper{ctrl: ctrl}\n\tmock.recorder = &MockSnapshotBrowserStepperMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockSnapshotBrowserStepper) EXPECT() *MockSnapshotBrowserStepperMockRecorder {\n\treturn m.recorder\n}\n\n// Cleanup mocks base method.\nfunc (m *MockSnapshotBrowserStepper) Cleanup(arg0 context.Context, arg1 *v10.PersistentVolumeClaim, arg2 *v10.Pod) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Cleanup\", arg0, arg1, arg2)\n}\n\n// Cleanup indicates an expected call of Cleanup.\nfunc (mr *MockSnapshotBrowserStepperMockRecorder) Cleanup(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Cleanup\", reflect.TypeOf((*MockSnapshotBrowserStepper)(nil).Cleanup), arg0, arg1, arg2)\n}\n\n// CreateInspectorApplication mocks base method.\nfunc (m *MockSnapshotBrowserStepper) CreateInspectorApplication(arg0 context.Context, arg1 *types.SnapshotBrowseArgs, arg2 *v1.VolumeSnapshot, arg3 *v11.StorageClass) (*v10.Pod, *v10.PersistentVolumeClaim, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateInspectorApplication\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(*v10.Pod)\n\tret1, _ := ret[1].(*v10.PersistentVolumeClaim)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\n// CreateInspectorApplication indicates an expected call of CreateInspectorApplication.\nfunc (mr *MockSnapshotBrowserStepperMockRecorder) CreateInspectorApplication(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateInspectorApplication\", reflect.TypeOf((*MockSnapshotBrowserStepper)(nil).CreateInspectorApplication), arg0, arg1, arg2, arg3)\n}\n\n// ExecuteTreeCommand mocks base method.\nfunc (m *MockSnapshotBrowserStepper) ExecuteTreeCommand(arg0 context.Context, arg1 *types.SnapshotBrowseArgs, arg2 *v10.Pod) (string, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ExecuteTreeCommand\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(string)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// ExecuteTreeCommand indicates an expected call of ExecuteTreeCommand.\nfunc (mr *MockSnapshotBrowserStepperMockRecorder) ExecuteTreeCommand(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ExecuteTreeCommand\", reflect.TypeOf((*MockSnapshotBrowserStepper)(nil).ExecuteTreeCommand), arg0, arg1, arg2)\n}\n\n// PortForwardAPod mocks base method.\nfunc (m *MockSnapshotBrowserStepper) PortForwardAPod(arg0 context.Context, arg1 *v10.Pod, arg2 int) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"PortForwardAPod\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// PortForwardAPod indicates an expected call of PortForwardAPod.\nfunc (mr *MockSnapshotBrowserStepperMockRecorder) PortForwardAPod(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"PortForwardAPod\", reflect.TypeOf((*MockSnapshotBrowserStepper)(nil).PortForwardAPod), arg0, arg1, arg2)\n}\n\n// ValidateArgs mocks base method.\nfunc (m *MockSnapshotBrowserStepper) ValidateArgs(arg0 context.Context, arg1 *types.SnapshotBrowseArgs) (*v1.VolumeSnapshot, *v11.StorageClass, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidateArgs\", arg0, arg1)\n\tret0, _ := ret[0].(*v1.VolumeSnapshot)\n\tret1, _ := ret[1].(*v11.StorageClass)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\n// ValidateArgs indicates an expected call of ValidateArgs.\nfunc (mr *MockSnapshotBrowserStepperMockRecorder) ValidateArgs(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ValidateArgs\", reflect.TypeOf((*MockSnapshotBrowserStepper)(nil).ValidateArgs), arg0, arg1)\n}\n"
  },
  {
    "path": "pkg/csi/mocks/mock_snapshot_creator.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: SnapshotCreator)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n\tsnapshot \"github.com/kanisterio/kanister/pkg/kube/snapshot\"\n\ttypes \"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\tv10 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\n// MockSnapshotCreator is a mock of SnapshotCreator interface.\ntype MockSnapshotCreator struct {\n\tctrl     *gomock.Controller\n\trecorder *MockSnapshotCreatorMockRecorder\n}\n\n// MockSnapshotCreatorMockRecorder is the mock recorder for MockSnapshotCreator.\ntype MockSnapshotCreatorMockRecorder struct {\n\tmock *MockSnapshotCreator\n}\n\n// NewMockSnapshotCreator creates a new mock instance.\nfunc NewMockSnapshotCreator(ctrl *gomock.Controller) *MockSnapshotCreator {\n\tmock := &MockSnapshotCreator{ctrl: ctrl}\n\tmock.recorder = &MockSnapshotCreatorMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockSnapshotCreator) EXPECT() *MockSnapshotCreatorMockRecorder {\n\treturn m.recorder\n}\n\n// CreateFromSourceCheck mocks base method.\nfunc (m *MockSnapshotCreator) CreateFromSourceCheck(arg0 context.Context, arg1 snapshot.Snapshotter, arg2 *types.CreateFromSourceCheckArgs, arg3 *v10.GroupVersionForDiscovery) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateFromSourceCheck\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// CreateFromSourceCheck indicates an expected call of CreateFromSourceCheck.\nfunc (mr *MockSnapshotCreatorMockRecorder) CreateFromSourceCheck(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateFromSourceCheck\", reflect.TypeOf((*MockSnapshotCreator)(nil).CreateFromSourceCheck), arg0, arg1, arg2, arg3)\n}\n\n// CreateSnapshot mocks base method.\nfunc (m *MockSnapshotCreator) CreateSnapshot(arg0 context.Context, arg1 snapshot.Snapshotter, arg2 *types.CreateSnapshotArgs) (*v1.VolumeSnapshot, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateSnapshot\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(*v1.VolumeSnapshot)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// CreateSnapshot indicates an expected call of CreateSnapshot.\nfunc (mr *MockSnapshotCreatorMockRecorder) CreateSnapshot(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateSnapshot\", reflect.TypeOf((*MockSnapshotCreator)(nil).CreateSnapshot), arg0, arg1, arg2)\n}\n\n// NewSnapshotter mocks base method.\nfunc (m *MockSnapshotCreator) NewSnapshotter() (snapshot.Snapshotter, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"NewSnapshotter\")\n\tret0, _ := ret[0].(snapshot.Snapshotter)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// NewSnapshotter indicates an expected call of NewSnapshotter.\nfunc (mr *MockSnapshotCreatorMockRecorder) NewSnapshotter() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"NewSnapshotter\", reflect.TypeOf((*MockSnapshotCreator)(nil).NewSnapshotter))\n}\n"
  },
  {
    "path": "pkg/csi/mocks/mock_snapshot_restore_stepper.go",
    "content": "// Code generated by MockGen. DO NOT EDIT.\n// Source: github.com/kastenhq/kubestr/pkg/csi (interfaces: SnapshotRestoreStepper)\n\n// Package mocks is a generated GoMock package.\npackage mocks\n\nimport (\n\tcontext \"context\"\n\treflect \"reflect\"\n\n\tgomock \"github.com/golang/mock/gomock\"\n\ttypes \"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\tv10 \"k8s.io/api/core/v1\"\n)\n\n// MockSnapshotRestoreStepper is a mock of SnapshotRestoreStepper interface.\ntype MockSnapshotRestoreStepper struct {\n\tctrl     *gomock.Controller\n\trecorder *MockSnapshotRestoreStepperMockRecorder\n}\n\n// MockSnapshotRestoreStepperMockRecorder is the mock recorder for MockSnapshotRestoreStepper.\ntype MockSnapshotRestoreStepperMockRecorder struct {\n\tmock *MockSnapshotRestoreStepper\n}\n\n// NewMockSnapshotRestoreStepper creates a new mock instance.\nfunc NewMockSnapshotRestoreStepper(ctrl *gomock.Controller) *MockSnapshotRestoreStepper {\n\tmock := &MockSnapshotRestoreStepper{ctrl: ctrl}\n\tmock.recorder = &MockSnapshotRestoreStepperMockRecorder{mock}\n\treturn mock\n}\n\n// EXPECT returns an object that allows the caller to indicate expected use.\nfunc (m *MockSnapshotRestoreStepper) EXPECT() *MockSnapshotRestoreStepperMockRecorder {\n\treturn m.recorder\n}\n\n// Cleanup mocks base method.\nfunc (m *MockSnapshotRestoreStepper) Cleanup(arg0 context.Context, arg1 *types.CSISnapshotRestoreResults) {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Cleanup\", arg0, arg1)\n}\n\n// Cleanup indicates an expected call of Cleanup.\nfunc (mr *MockSnapshotRestoreStepperMockRecorder) Cleanup(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Cleanup\", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).Cleanup), arg0, arg1)\n}\n\n// CreateApplication mocks base method.\nfunc (m *MockSnapshotRestoreStepper) CreateApplication(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs, arg2 string) (*v10.Pod, *v10.PersistentVolumeClaim, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"CreateApplication\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(*v10.Pod)\n\tret1, _ := ret[1].(*v10.PersistentVolumeClaim)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\n// CreateApplication indicates an expected call of CreateApplication.\nfunc (mr *MockSnapshotRestoreStepperMockRecorder) CreateApplication(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"CreateApplication\", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).CreateApplication), arg0, arg1, arg2)\n}\n\n// RestoreApplication mocks base method.\nfunc (m *MockSnapshotRestoreStepper) RestoreApplication(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs, arg2 *v1.VolumeSnapshot) (*v10.Pod, *v10.PersistentVolumeClaim, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"RestoreApplication\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(*v10.Pod)\n\tret1, _ := ret[1].(*v10.PersistentVolumeClaim)\n\tret2, _ := ret[2].(error)\n\treturn ret0, ret1, ret2\n}\n\n// RestoreApplication indicates an expected call of RestoreApplication.\nfunc (mr *MockSnapshotRestoreStepperMockRecorder) RestoreApplication(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"RestoreApplication\", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).RestoreApplication), arg0, arg1, arg2)\n}\n\n// SnapshotApplication mocks base method.\nfunc (m *MockSnapshotRestoreStepper) SnapshotApplication(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs, arg2 *v10.PersistentVolumeClaim, arg3 string) (*v1.VolumeSnapshot, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"SnapshotApplication\", arg0, arg1, arg2, arg3)\n\tret0, _ := ret[0].(*v1.VolumeSnapshot)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}\n\n// SnapshotApplication indicates an expected call of SnapshotApplication.\nfunc (mr *MockSnapshotRestoreStepperMockRecorder) SnapshotApplication(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"SnapshotApplication\", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).SnapshotApplication), arg0, arg1, arg2, arg3)\n}\n\n// ValidateArgs mocks base method.\nfunc (m *MockSnapshotRestoreStepper) ValidateArgs(arg0 context.Context, arg1 *types.CSISnapshotRestoreArgs) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidateArgs\", arg0, arg1)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// ValidateArgs indicates an expected call of ValidateArgs.\nfunc (mr *MockSnapshotRestoreStepperMockRecorder) ValidateArgs(arg0, arg1 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ValidateArgs\", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).ValidateArgs), arg0, arg1)\n}\n\n// ValidateData mocks base method.\nfunc (m *MockSnapshotRestoreStepper) ValidateData(arg0 context.Context, arg1 *v10.Pod, arg2 string) error {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"ValidateData\", arg0, arg1, arg2)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}\n\n// ValidateData indicates an expected call of ValidateData.\nfunc (mr *MockSnapshotRestoreStepperMockRecorder) ValidateData(arg0, arg1, arg2 interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"ValidateData\", reflect.TypeOf((*MockSnapshotRestoreStepper)(nil).ValidateData), arg0, arg1, arg2)\n}\n"
  },
  {
    "path": "pkg/csi/pvc_inspector.go",
    "content": "package csi\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"os/signal\"\n\t\"runtime\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t\"github.com/pkg/errors\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/dynamic\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\ntype PVCBrowseRunner struct {\n\tKubeCli      kubernetes.Interface\n\tDynCli       dynamic.Interface\n\tbrowserSteps PVCBrowserStepper\n\tpvc          *v1.PersistentVolumeClaim\n\tpod          *v1.Pod\n\tsnapshot     *snapv1.VolumeSnapshot\n}\n\nfunc (r *PVCBrowseRunner) RunPVCBrowse(ctx context.Context, args *types.PVCBrowseArgs) error {\n\tr.browserSteps = &pvcBrowserSteps{\n\t\tvalidateOps: &validateOperations{\n\t\t\tkubeCli: r.KubeCli,\n\t\t\tdynCli:  r.DynCli,\n\t\t},\n\t\tversionFetchOps: &apiVersionFetch{\n\t\t\tkubeCli: r.KubeCli,\n\t\t},\n\t\tcreateAppOps: &applicationCreate{\n\t\t\tkubeCli: r.KubeCli,\n\t\t},\n\t\tsnapshotCreateOps: &snapshotCreate{\n\t\t\tkubeCli: r.KubeCli,\n\t\t\tdynCli:  r.DynCli,\n\t\t},\n\t\tportForwardOps: &portforward{},\n\t\tkubeExecutor: &kubeExec{\n\t\t\tkubeCli: r.KubeCli,\n\t\t},\n\t\tcleanerOps: &cleanse{\n\t\t\tkubeCli: r.KubeCli,\n\t\t\tdynCli:  r.DynCli,\n\t\t},\n\t}\n\tif args.ShowTree {\n\t\tfmt.Println(\"Show Tree works for PVC!\")\n\t\treturn nil\n\t}\n\treturn r.RunPVCBrowseHelper(ctx, args)\n}\n\nfunc (r *PVCBrowseRunner) RunPVCBrowseHelper(ctx context.Context, args *types.PVCBrowseArgs) error {\n\tdefer func() {\n\t\tfmt.Println(\"Cleaning up resources\")\n\t\tr.browserSteps.Cleanup(ctx, r.pvc, r.pod, r.snapshot)\n\t}()\n\tif r.KubeCli == nil || r.DynCli == nil {\n\t\treturn fmt.Errorf(\"cli uninitialized\")\n\t}\n\tsc, err := r.browserSteps.ValidateArgs(ctx, args)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate arguments\")\n\t}\n\n\tfmt.Println(\"Taking a snapshot.\")\n\tsnapName := snapshotPrefix + time.Now().Format(\"20060102150405\")\n\tr.snapshot, err = r.browserSteps.SnapshotPVC(ctx, args, snapName)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to snapshot PVC\")\n\t}\n\n\tfmt.Println(\"Creating the browser pod.\")\n\tr.pod, r.pvc, err = r.browserSteps.CreateInspectorApplication(ctx, args, r.snapshot, sc)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create inspector application\")\n\t}\n\n\tif args.ShowTree {\n\t\tfmt.Println(\"Printing the tree structure from root directory.\")\n\t\tstdout, err := r.browserSteps.ExecuteTreeCommand(ctx, args, r.pod)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to execute tree command in pod\")\n\t\t}\n\t\tfmt.Printf(\"\\n%s\\n\\n\", stdout)\n\t\treturn nil\n\t}\n\n\tfmt.Println(\"Forwarding the port.\")\n\terr = r.browserSteps.PortForwardAPod(ctx, r.pod, args.LocalPort)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to forward pod port\")\n\t}\n\n\treturn nil\n}\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_pvc_browser_stepper.go -package=mocks . PVCBrowserStepper\ntype PVCBrowserStepper interface {\n\tValidateArgs(ctx context.Context, args *types.PVCBrowseArgs) (*sv1.StorageClass, error)\n\tSnapshotPVC(ctx context.Context, args *types.PVCBrowseArgs, snapshotName string) (*snapv1.VolumeSnapshot, error)\n\tCreateInspectorApplication(ctx context.Context, args *types.PVCBrowseArgs, snapshot *snapv1.VolumeSnapshot, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, error)\n\tExecuteTreeCommand(ctx context.Context, args *types.PVCBrowseArgs, pod *v1.Pod) (string, error)\n\tPortForwardAPod(ctx context.Context, pod *v1.Pod, localPort int) error\n\tCleanup(ctx context.Context, pvc *v1.PersistentVolumeClaim, pod *v1.Pod, snapshot *snapv1.VolumeSnapshot)\n}\n\ntype pvcBrowserSteps struct {\n\tvalidateOps          ArgumentValidator\n\tversionFetchOps      ApiVersionFetcher\n\tcreateAppOps         ApplicationCreator\n\tsnapshotCreateOps    SnapshotCreator\n\tportForwardOps       PortForwarder\n\tcleanerOps           Cleaner\n\tkubeExecutor         KubeExecutor\n\tSnapshotGroupVersion *metav1.GroupVersionForDiscovery\n}\n\nfunc (p *pvcBrowserSteps) ValidateArgs(ctx context.Context, args *types.PVCBrowseArgs) (*sv1.StorageClass, error) {\n\tif err := args.Validate(); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to validate input arguments\")\n\t}\n\tif err := p.validateOps.ValidateNamespace(ctx, args.Namespace); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to validate Namespace\")\n\t}\n\tpvc, err := p.validateOps.ValidatePVC(ctx, args.PVCName, args.Namespace)\n\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to validate PVC\")\n\t}\n\n\tpvName := pvc.Spec.VolumeName\n\tif pvName == \"\" {\n\t\treturn nil, errors.Errorf(\"PVC (%s) not bound. namespace - (%s)\", pvc.Name, pvc.Namespace)\n\t}\n\tpv, err := p.validateOps.FetchPV(ctx, pvName)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to fetch PV\")\n\t}\n\tif pv.Spec.CSI == nil {\n\t\treturn nil, errors.New(\"PVC is not using a CSI volume\")\n\t}\n\tsc, err := p.validateOps.ValidateStorageClass(ctx, *pvc.Spec.StorageClassName)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to validate SC\")\n\t}\n\tgroupVersion, err := p.versionFetchOps.GetCSISnapshotGroupVersion()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to fetch groupVersion\")\n\t}\n\tp.SnapshotGroupVersion = groupVersion\n\tuVSC, err := p.validateOps.ValidateVolumeSnapshotClass(ctx, args.VolumeSnapshotClass, groupVersion)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to validate VolumeSnapshotClass\")\n\t}\n\tvscDriver := getDriverNameFromUVSC(*uVSC, groupVersion.GroupVersion)\n\tif sc.Provisioner != vscDriver {\n\t\treturn nil, fmt.Errorf(\"provisioner for StorageClass (%s) and VolumeSnapshotClass driver (%s) are different\", sc.Provisioner, vscDriver)\n\t}\n\treturn sc, nil\n}\n\nfunc (p *pvcBrowserSteps) SnapshotPVC(ctx context.Context, args *types.PVCBrowseArgs, snapshotName string) (*snapv1.VolumeSnapshot, error) {\n\tsnapshotter, err := p.snapshotCreateOps.NewSnapshotter()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load snapshotter\")\n\t}\n\tcreateSnapshotArgs := &types.CreateSnapshotArgs{\n\t\tNamespace:           args.Namespace,\n\t\tPVCName:             args.PVCName,\n\t\tVolumeSnapshotClass: args.VolumeSnapshotClass,\n\t\tSnapshotName:        snapshotName,\n\t}\n\treturn p.snapshotCreateOps.CreateSnapshot(ctx, snapshotter, createSnapshotArgs)\n}\n\nfunc (p *pvcBrowserSteps) CreateInspectorApplication(ctx context.Context, args *types.PVCBrowseArgs, snapshot *snapv1.VolumeSnapshot, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, error) {\n\tsnapshotAPIGroup := \"snapshot.storage.k8s.io\"\n\tsnapshotKind := \"VolumeSnapshot\"\n\tdataSource := &v1.TypedLocalObjectReference{\n\t\tAPIGroup: &snapshotAPIGroup,\n\t\tKind:     snapshotKind,\n\t\tName:     snapshot.Name,\n\t}\n\tpvcArgs := &types.CreatePVCArgs{\n\t\tGenerateName: clonedPVCGenerateName,\n\t\tStorageClass: storageClass.Name,\n\t\tNamespace:    args.Namespace,\n\t\tDataSource:   dataSource,\n\t\tRestoreSize:  snapshot.Status.RestoreSize,\n\t}\n\tpvc, err := p.createAppOps.CreatePVC(ctx, pvcArgs)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to restore PVC\")\n\t}\n\tpodArgs := &types.CreatePodArgs{\n\t\tGenerateName:   clonedPodGenerateName,\n\t\tNamespace:      args.Namespace,\n\t\tRunAsUser:      args.RunAsUser,\n\t\tContainerImage: \"filebrowser/filebrowser:v2\",\n\t\tContainerArgs:  []string{\"--noauth\", \"-r\", \"/pvc-data\"},\n\t\tPVCMap: map[string]types.VolumePath{\n\t\t\tpvc.Name: {\n\t\t\t\tMountPath: \"/pvc-data\",\n\t\t\t},\n\t\t},\n\t}\n\tif args.ShowTree {\n\t\tpodArgs = &types.CreatePodArgs{\n\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\tNamespace:      args.Namespace,\n\t\t\tRunAsUser:      args.RunAsUser,\n\t\t\tContainerImage: \"alpine:3.19\",\n\t\t\tCommand:        []string{\"/bin/sh\"},\n\t\t\tContainerArgs:  []string{\"-c\", \"while true; do sleep 3600; done\"},\n\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\tpvc.Name: {\n\t\t\t\t\tMountPath: \"/pvc-data\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\tpod, err := p.createAppOps.CreatePod(ctx, podArgs)\n\tif err != nil {\n\t\treturn nil, pvc, errors.Wrap(err, \"failed to create browse pod\")\n\t}\n\tif err = p.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil {\n\t\treturn pod, pvc, errors.Wrap(err, \"pod failed to become ready\")\n\t}\n\treturn pod, pvc, nil\n}\n\nfunc (p *pvcBrowserSteps) ExecuteTreeCommand(ctx context.Context, args *types.PVCBrowseArgs, pod *v1.Pod) (string, error) {\n\tcommand := []string{\"tree\", \"/pvc-data\"}\n\tstdout, err := p.kubeExecutor.Exec(ctx, args.Namespace, pod.Name, pod.Spec.Containers[0].Name, command)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"error running command:(%v)\", command)\n\t}\n\treturn stdout, nil\n}\n\nfunc (p *pvcBrowserSteps) PortForwardAPod(ctx context.Context, pod *v1.Pod, localPort int) error {\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tstopChan, readyChan, errChan := make(chan struct{}, 1), make(chan struct{}, 1), make(chan string)\n\tout, errOut := new(bytes.Buffer), new(bytes.Buffer)\n\tcfg, err := p.portForwardOps.FetchRestConfig()\n\tif err != nil {\n\t\treturn errors.New(\"Failed to fetch rest config\")\n\t}\n\tsigs := make(chan os.Signal, 1)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\n\tgo func() {\n\t\t<-sigs\n\t\tfmt.Println(\"Stopping port forward\")\n\t\tclose(stopChan)\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tpfArgs := &types.PortForwardAPodRequest{\n\t\t\tRestConfig:   cfg,\n\t\t\tPod:          pod,\n\t\t\tLocalPort:    localPort,\n\t\t\tPodPort:      80,\n\t\t\tOutStream:    bytes.Buffer(*out),\n\t\t\tErrOutStream: bytes.Buffer(*errOut),\n\t\t\tStopCh:       stopChan,\n\t\t\tReadyCh:      readyChan,\n\t\t}\n\t\terr = p.portForwardOps.PortForwardAPod(pfArgs)\n\t\tif err != nil {\n\t\t\terrChan <- fmt.Sprintf(\"Failed to port forward (%s)\", err.Error())\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-readyChan:\n\t\turl := fmt.Sprintf(\"http://localhost:%d/\", localPort)\n\t\tfmt.Printf(\"Port forwarding is ready to get traffic. visit %s\\n\", url)\n\t\topenbrowser(url)\n\t\twg.Wait()\n\tcase msg := <-errChan:\n\t\treturn errors.New(msg)\n\t}\n\n\treturn nil\n}\n\nfunc (p *pvcBrowserSteps) Cleanup(ctx context.Context, pvc *v1.PersistentVolumeClaim, pod *v1.Pod, snapshot *snapv1.VolumeSnapshot) {\n\tif pvc != nil {\n\t\terr := p.cleanerOps.DeletePVC(ctx, pvc.Name, pvc.Namespace)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Failed to delete PVC\", pvc)\n\t\t}\n\t}\n\tif pod != nil {\n\t\terr := p.cleanerOps.DeletePod(ctx, pod.Name, pod.Namespace)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Failed to delete Pod\", pod)\n\t\t}\n\t}\n\tif snapshot != nil {\n\t\terr := p.cleanerOps.DeleteSnapshot(ctx, snapshot.Name, snapshot.Namespace, p.SnapshotGroupVersion)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Failed to delete Snapshot\", snapshot)\n\t\t}\n\t}\n}\n\nfunc openbrowser(url string) {\n\tvar err error\n\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\terr = exec.Command(\"xdg-open\", url).Start()\n\tcase \"windows\":\n\t\terr = exec.Command(\"rundll32\", \"url.dll,FileProtocolHandler\", url).Start()\n\tcase \"darwin\":\n\t\terr = exec.Command(\"open\", url).Start()\n\tdefault:\n\t\terr = fmt.Errorf(\"unsupported platform\")\n\t}\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n}\n"
  },
  {
    "path": "pkg/csi/pvc_inspector_steps_test.go",
    "content": "package csi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/kastenhq/kubestr/pkg/common\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/mocks\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t. \"gopkg.in/check.v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n)\n\nfunc (s *CSITestSuite) TestPvcBrowseValidateArgs(c *C) {\n\tctx := context.Background()\n\tscName := \"sc\"\n\ttype fields struct {\n\t\tvalidateOps *mocks.MockArgumentValidator\n\t\tversionOps  *mocks.MockApiVersionFetcher\n\t}\n\tfor _, tc := range []struct {\n\t\targs       *types.PVCBrowseArgs\n\t\tprepare    func(f *fields)\n\t\terrChecker Checker\n\t}{\n\t\t{ // valid args\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"pvc\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().FetchPV(gomock.Any(), \"vol\").Return(\n\t\t\t\t\t\t&v1.PersistentVolume{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName: \"vol\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeSpec{\n\t\t\t\t\t\t\t\tPersistentVolumeSource: v1.PersistentVolumeSource{\n\t\t\t\t\t\t\t\t\tCSI: &v1.CSIPersistentVolumeSource{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), \"sc\").Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(\n\t\t\t\t\t\t&metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), \"vsc\", &metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t}).Return(&unstructured.Unstructured{\n\t\t\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\t\tcommon.VolSnapClassDriverKey: \"p1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{ // driver mismatch\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"pvc\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().FetchPV(gomock.Any(), \"vol\").Return(\n\t\t\t\t\t\t&v1.PersistentVolume{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName: \"vol\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeSpec{\n\t\t\t\t\t\t\t\tPersistentVolumeSource: v1.PersistentVolumeSource{\n\t\t\t\t\t\t\t\t\tCSI: &v1.CSIPersistentVolumeSource{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(\n\t\t\t\t\t\t&metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), \"vsc\", &metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t}).Return(&unstructured.Unstructured{\n\t\t\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\t\tcommon.VolSnapClassDriverKey: \"p2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // vsc error\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().FetchPV(gomock.Any(), \"vol\").Return(\n\t\t\t\t\t\t&v1.PersistentVolume{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName: \"vol\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeSpec{\n\t\t\t\t\t\t\t\tPersistentVolumeSource: v1.PersistentVolumeSource{\n\t\t\t\t\t\t\t\t\tCSI: &v1.CSIPersistentVolumeSource{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"vsc error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // get driver versionn error\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().FetchPV(gomock.Any(), \"vol\").Return(\n\t\t\t\t\t\t&v1.PersistentVolume{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName: \"vol\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeSpec{\n\t\t\t\t\t\t\t\tPersistentVolumeSource: v1.PersistentVolumeSource{\n\t\t\t\t\t\t\t\t\tCSI: &v1.CSIPersistentVolumeSource{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, fmt.Errorf(\"driver version error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // sc error\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().FetchPV(gomock.Any(), \"vol\").Return(\n\t\t\t\t\t\t&v1.PersistentVolume{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName: \"vol\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeSpec{\n\t\t\t\t\t\t\t\tPersistentVolumeSource: v1.PersistentVolumeSource{\n\t\t\t\t\t\t\t\t\tCSI: &v1.CSIPersistentVolumeSource{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"sc error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // non csi error\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().FetchPV(gomock.Any(), \"vol\").Return(\n\t\t\t\t\t\t&v1.PersistentVolume{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName: \"vol\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeSpec{\n\t\t\t\t\t\t\t\tPersistentVolumeSource: v1.PersistentVolumeSource{\n\t\t\t\t\t\t\t\t\tGCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // fetch pv error\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().FetchPV(gomock.Any(), \"vol\").Return(nil, fmt.Errorf(\"pv fail\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate pvc error\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"validate pvc error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate ns error\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"pvc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(fmt.Errorf(\"validate ns error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate pvc error\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate vsc error\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"dfd\",\n\t\t\t\tVolumeSnapshotClass: \"\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate ns error\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tPVCName:             \"dfd\",\n\t\t\t\tVolumeSnapshotClass: \"ddd\",\n\t\t\t\tNamespace:           \"\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tvalidateOps: mocks.NewMockArgumentValidator(ctrl),\n\t\t\tversionOps:  mocks.NewMockApiVersionFetcher(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &pvcBrowserSteps{\n\t\t\tvalidateOps:     f.validateOps,\n\t\t\tversionFetchOps: f.versionOps,\n\t\t}\n\t\t_, err := stepper.ValidateArgs(ctx, tc.args)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestPvcBrowseSnapshotPVC(c *C) {\n\tctx := context.Background()\n\tsnapshotter := &fakeSnapshotter{name: \"snapshotter\"}\n\tgroupversion := &metav1.GroupVersionForDiscovery{\n\t\tGroupVersion: \"gv\",\n\t\tVersion:      \"v\",\n\t}\n\ttype fields struct {\n\t\tsnapshotOps *mocks.MockSnapshotCreator\n\t}\n\tfor _, tc := range []struct {\n\t\targs         *types.PVCBrowseArgs\n\t\tsnapshotName string\n\t\tprepare      func(f *fields)\n\t\terrChecker   Checker\n\t\tsnapChecker  Checker\n\t}{\n\t\t{\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tPVCName:             \"pvc1\",\n\t\t\t},\n\t\t\tsnapshotName: \"snap1\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil),\n\t\t\t\t\tf.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{\n\t\t\t\t\t\tNamespace:           \"ns\",\n\t\t\t\t\t\tPVCName:             \"pvc1\",\n\t\t\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t\t\t}).Return(&snapv1.VolumeSnapshot{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"createdName\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker:  IsNil,\n\t\t\tsnapChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tPVCName:             \"pvc1\",\n\t\t\t},\n\t\t\tsnapshotName: \"snap1\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil),\n\t\t\t\t\tf.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{\n\t\t\t\t\t\tNamespace:           \"ns\",\n\t\t\t\t\t\tPVCName:             \"pvc1\",\n\t\t\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t\t\t}).Return(nil, fmt.Errorf(\"error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker:  NotNil,\n\t\t\tsnapChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tPVCName:             \"pvc1\",\n\t\t\t},\n\t\t\tsnapshotName: \"snap1\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.snapshotOps.EXPECT().NewSnapshotter().Return(nil, fmt.Errorf(\"error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker:  NotNil,\n\t\t\tsnapChecker: IsNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tsnapshotOps: mocks.NewMockSnapshotCreator(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &pvcBrowserSteps{\n\t\t\tsnapshotCreateOps:    f.snapshotOps,\n\t\t\tSnapshotGroupVersion: groupversion,\n\t\t}\n\t\tsnapshot, err := stepper.SnapshotPVC(ctx, tc.args, tc.snapshotName)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Check(snapshot, tc.snapChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestCreateInspectorApplicationForPVC(c *C) {\n\tctx := context.Background()\n\tresourceQuantity := resource.MustParse(\"1Gi\")\n\tsnapshotAPIGroup := \"snapshot.storage.k8s.io\"\n\ttype fields struct {\n\t\tcreateAppOps *mocks.MockApplicationCreator\n\t}\n\tfor _, tc := range []struct {\n\t\targs       *types.PVCBrowseArgs\n\t\tsnapshot   *snapv1.VolumeSnapshot\n\t\tsc         *sv1.StorageClass\n\t\tprepare    func(f *fields)\n\t\terrChecker Checker\n\t\tpodChecker Checker\n\t\tpvcChecker Checker\n\t}{\n\t\t{\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tRunAsUser: 100,\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"snap1\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{\n\t\t\t\t\t\tGenerateName: clonedPVCGenerateName,\n\t\t\t\t\t\tStorageClass: \"sc\",\n\t\t\t\t\t\tNamespace:    \"ns\",\n\t\t\t\t\t\tDataSource: &v1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &snapshotAPIGroup,\n\t\t\t\t\t\t\tKind:     \"VolumeSnapshot\",\n\t\t\t\t\t\t\tName:     \"snap1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t\t}).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tContainerArgs:  []string{\"--noauth\", \"-r\", \"/pvc-data\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"filebrowser/filebrowser:v2\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"pvc1\": {\n\t\t\t\t\t\t\t\tMountPath: \"/pvc-data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pod1\").Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tRunAsUser: 100,\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"snap1\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{\n\t\t\t\t\t\tGenerateName: clonedPVCGenerateName,\n\t\t\t\t\t\tStorageClass: \"sc\",\n\t\t\t\t\t\tNamespace:    \"ns\",\n\t\t\t\t\t\tDataSource: &v1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &snapshotAPIGroup,\n\t\t\t\t\t\t\tKind:     \"VolumeSnapshot\",\n\t\t\t\t\t\t\tName:     \"snap1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t\t}).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tContainerArgs:  []string{\"--noauth\", \"-r\", \"/pvc-data\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"filebrowser/filebrowser:v2\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"pvc1\": {\n\t\t\t\t\t\t\t\tMountPath: \"/pvc-data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pod1\").Return(fmt.Errorf(\"pod ready error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tRunAsUser: 100,\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"snap1\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"pod  error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.PVCBrowseArgs{\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tRunAsUser: 100,\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"snap1\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t\tpvcChecker: IsNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tcreateAppOps: mocks.NewMockApplicationCreator(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &pvcBrowserSteps{\n\t\t\tcreateAppOps: f.createAppOps,\n\t\t}\n\t\tpod, pvc, err := stepper.CreateInspectorApplication(ctx, tc.args, tc.snapshot, tc.sc)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Check(pod, tc.podChecker)\n\t\tc.Check(pvc, tc.pvcChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestPVCBrowseCleanup(c *C) {\n\tctx := context.Background()\n\tgroupversion := &metav1.GroupVersionForDiscovery{\n\t\tGroupVersion: \"gv\",\n\t\tVersion:      \"v\",\n\t}\n\ttype fields struct {\n\t\tcleanerOps *mocks.MockCleaner\n\t}\n\tfor _, tc := range []struct {\n\t\tpvc      *v1.PersistentVolumeClaim\n\t\tpod      *v1.Pod\n\t\tsnapshot *snapv1.VolumeSnapshot\n\t\tprepare  func(f *fields)\n\t}{\n\t\t{\n\t\t\tpvc: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"snap1\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePVC(ctx, \"pvc\", \"ns\").Return(nil),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePod(ctx, \"pod\", \"ns\").Return(nil),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeleteSnapshot(ctx, \"snap1\", \"ns\", groupversion).Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpvc: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"snap1\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePVC(ctx, \"pvc\", \"ns\").Return(fmt.Errorf(\"err\")),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePod(ctx, \"pod\", \"ns\").Return(fmt.Errorf(\"err\")),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeleteSnapshot(ctx, \"snap1\", \"ns\", groupversion).Return(fmt.Errorf(\"err\")),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tcleanerOps: mocks.NewMockCleaner(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &pvcBrowserSteps{\n\t\t\tcleanerOps:           f.cleanerOps,\n\t\t\tSnapshotGroupVersion: groupversion,\n\t\t}\n\t\tstepper.Cleanup(ctx, tc.pvc, tc.pod, tc.snapshot)\n\t}\n}\n"
  },
  {
    "path": "pkg/csi/pvc_inspector_test.go",
    "content": "package csi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/mocks\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t. \"gopkg.in/check.v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/client-go/dynamic\"\n\tfakedynamic \"k8s.io/client-go/dynamic/fake\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\nfunc (s *CSITestSuite) TestRunPVCBrowseHelper(c *C) {\n\tctx := context.Background()\n\ttype fields struct {\n\t\tstepperOps *mocks.MockPVCBrowserStepper\n\t}\n\tfor _, tc := range []struct {\n\t\tkubeCli    kubernetes.Interface\n\t\tdynCli     dynamic.Interface\n\t\targs       *types.PVCBrowseArgs\n\t\tprepare    func(f *fields)\n\t\terrChecker Checker\n\t}{\n\t\t{\n\t\t\t// success\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.PVCBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&sv1.StorageClass{}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.stepperOps.EXPECT().SnapshotPVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"snap1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t}}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(),\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"snap1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, &sv1.StorageClass{},\n\t\t\t\t\t).Return(\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t\tf.stepperOps.EXPECT().PortForwardAPod(gomock.Any(),\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, gomock.Any(),\n\t\t\t\t\t).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(),\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"snap1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\t// portforward failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.PVCBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().SnapshotPVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().PortForwardAPod(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf(\"portforward error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// createapp failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.PVCBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().SnapshotPVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf(\"createapp error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// snapshot failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.PVCBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().SnapshotPVC(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"snapshot error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// validate failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.PVCBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"snapshot error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// emptycli failure\n\t\t\tkubeCli: nil,\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.PVCBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// emptydyncli failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  nil,\n\t\t\targs:    &types.PVCBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tstepperOps: mocks.NewMockPVCBrowserStepper(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\trunner := &PVCBrowseRunner{\n\t\t\tKubeCli:      tc.kubeCli,\n\t\t\tDynCli:       tc.dynCli,\n\t\t\tbrowserSteps: f.stepperOps,\n\t\t}\n\t\terr := runner.RunPVCBrowseHelper(ctx, tc.args)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestPVCBrowseRunner(c *C) {\n\tctx := context.Background()\n\tr := &PVCBrowseRunner{\n\t\tbrowserSteps: &pvcBrowserSteps{},\n\t}\n\terr := r.RunPVCBrowseHelper(ctx, nil)\n\tc.Check(err, NotNil)\n}\n"
  },
  {
    "path": "pkg/csi/snapshot_inspector.go",
    "content": "package csi\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/signal\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t\"github.com/pkg/errors\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/dynamic\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\ntype SnapshotBrowseRunner struct {\n\tKubeCli      kubernetes.Interface\n\tDynCli       dynamic.Interface\n\tbrowserSteps SnapshotBrowserStepper\n\tpvc          *v1.PersistentVolumeClaim\n\tpod          *v1.Pod\n\tsnapshot     *snapv1.VolumeSnapshot\n}\n\nfunc (r *SnapshotBrowseRunner) RunSnapshotBrowse(ctx context.Context, args *types.SnapshotBrowseArgs) error {\n\tr.browserSteps = &snapshotBrowserSteps{\n\t\tvalidateOps: &validateOperations{\n\t\t\tkubeCli: r.KubeCli,\n\t\t\tdynCli:  r.DynCli,\n\t\t},\n\t\tversionFetchOps: &apiVersionFetch{\n\t\t\tkubeCli: r.KubeCli,\n\t\t},\n\t\tcreateAppOps: &applicationCreate{\n\t\t\tkubeCli: r.KubeCli,\n\t\t},\n\t\tportForwardOps: &portforward{},\n\t\tkubeExecutor: &kubeExec{\n\t\t\tkubeCli: r.KubeCli,\n\t\t},\n\t\tcleanerOps: &cleanse{\n\t\t\tkubeCli: r.KubeCli,\n\t\t\tdynCli:  r.DynCli,\n\t\t},\n\t}\n\treturn r.RunSnapshotBrowseHelper(ctx, args)\n}\n\nfunc (r *SnapshotBrowseRunner) RunSnapshotBrowseHelper(ctx context.Context, args *types.SnapshotBrowseArgs) error {\n\tdefer func() {\n\t\tfmt.Println(\"Cleaning up resources.\")\n\t\tr.browserSteps.Cleanup(ctx, r.pvc, r.pod)\n\t}()\n\n\tif r.KubeCli == nil || r.DynCli == nil {\n\t\treturn fmt.Errorf(\"cli uninitialized\")\n\t}\n\n\tfmt.Println(\"Fetching the snapshot.\")\n\tvs, sc, err := r.browserSteps.ValidateArgs(ctx, args)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate arguments.\")\n\t}\n\tr.snapshot = vs\n\n\tfmt.Println(\"Creating the browser pod.\")\n\tr.pod, r.pvc, err = r.browserSteps.CreateInspectorApplication(ctx, args, r.snapshot, sc)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create inspector application.\")\n\t}\n\n\tif args.ShowTree {\n\t\tfmt.Println(\"Printing the tree structure from root directory.\")\n\t\tstdout, err := r.browserSteps.ExecuteTreeCommand(ctx, args, r.pod)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to execute tree command in pod.\")\n\t\t}\n\t\tfmt.Printf(\"\\n%s\\n\\n\", stdout)\n\t\treturn nil\n\t}\n\n\tfmt.Println(\"Forwarding the port.\")\n\terr = r.browserSteps.PortForwardAPod(ctx, r.pod, args.LocalPort)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to port forward Pod.\")\n\t}\n\n\treturn nil\n}\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_snapshot_browser_stepper.go -package=mocks . SnapshotBrowserStepper\ntype SnapshotBrowserStepper interface {\n\tValidateArgs(ctx context.Context, args *types.SnapshotBrowseArgs) (*snapv1.VolumeSnapshot, *sv1.StorageClass, error)\n\tCreateInspectorApplication(ctx context.Context, args *types.SnapshotBrowseArgs, snapshot *snapv1.VolumeSnapshot, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, error)\n\tExecuteTreeCommand(ctx context.Context, args *types.SnapshotBrowseArgs, pod *v1.Pod) (string, error)\n\tPortForwardAPod(ctx context.Context, pod *v1.Pod, localPort int) error\n\tCleanup(ctx context.Context, pvc *v1.PersistentVolumeClaim, pod *v1.Pod)\n}\n\ntype snapshotBrowserSteps struct {\n\tvalidateOps          ArgumentValidator\n\tversionFetchOps      ApiVersionFetcher\n\tcreateAppOps         ApplicationCreator\n\tportForwardOps       PortForwarder\n\tcleanerOps           Cleaner\n\tkubeExecutor         KubeExecutor\n\tSnapshotGroupVersion *metav1.GroupVersionForDiscovery\n}\n\nfunc (s *snapshotBrowserSteps) ValidateArgs(ctx context.Context, args *types.SnapshotBrowseArgs) (*snapv1.VolumeSnapshot, *sv1.StorageClass, error) {\n\tif err := args.Validate(); err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to validate input arguments\")\n\t}\n\tif err := s.validateOps.ValidateNamespace(ctx, args.Namespace); err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to validate Namespace\")\n\t}\n\tgroupVersion, err := s.versionFetchOps.GetCSISnapshotGroupVersion()\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to fetch groupVersion\")\n\t}\n\ts.SnapshotGroupVersion = groupVersion\n\tsnapshot, err := s.validateOps.ValidateVolumeSnapshot(ctx, args.SnapshotName, args.Namespace, groupVersion)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to validate VolumeSnapshot\")\n\t}\n\tpvc, err := s.validateOps.ValidatePVC(ctx, *snapshot.Spec.Source.PersistentVolumeClaimName, args.Namespace)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to validate source PVC\")\n\t}\n\tsc, err := s.validateOps.ValidateStorageClass(ctx, *pvc.Spec.StorageClassName)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to validate SC\")\n\t}\n\tuVSC, err := s.validateOps.ValidateVolumeSnapshotClass(ctx, *snapshot.Spec.VolumeSnapshotClassName, groupVersion)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to validate VolumeSnapshotClass\")\n\t}\n\tvscDriver := getDriverNameFromUVSC(*uVSC, groupVersion.GroupVersion)\n\tif sc.Provisioner != vscDriver {\n\t\treturn nil, nil, fmt.Errorf(\"provisioner for StorageClass (%s) and VolumeSnapshotClass driver (%s) are different\", sc.Provisioner, vscDriver)\n\t}\n\treturn snapshot, sc, nil\n}\n\nfunc (s *snapshotBrowserSteps) CreateInspectorApplication(ctx context.Context, args *types.SnapshotBrowseArgs, snapshot *snapv1.VolumeSnapshot, storageClass *sv1.StorageClass) (*v1.Pod, *v1.PersistentVolumeClaim, error) {\n\tsnapshotAPIGroup := \"snapshot.storage.k8s.io\"\n\tsnapshotKind := \"VolumeSnapshot\"\n\tdataSource := &v1.TypedLocalObjectReference{\n\t\tAPIGroup: &snapshotAPIGroup,\n\t\tKind:     snapshotKind,\n\t\tName:     snapshot.Name,\n\t}\n\tpvcArgs := &types.CreatePVCArgs{\n\t\tGenerateName: clonedPVCGenerateName,\n\t\tStorageClass: storageClass.Name,\n\t\tNamespace:    args.Namespace,\n\t\tDataSource:   dataSource,\n\t\tRestoreSize:  snapshot.Status.RestoreSize,\n\t}\n\tpvc, err := s.createAppOps.CreatePVC(ctx, pvcArgs)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to restore PVC\")\n\t}\n\tpodArgs := &types.CreatePodArgs{\n\t\tGenerateName:   clonedPodGenerateName,\n\t\tNamespace:      args.Namespace,\n\t\tRunAsUser:      args.RunAsUser,\n\t\tContainerImage: \"filebrowser/filebrowser:v2\",\n\t\tContainerArgs:  []string{\"--noauth\", \"-r\", \"/snapshot-data\"},\n\t\tPVCMap: map[string]types.VolumePath{\n\t\t\tpvc.Name: {\n\t\t\t\tMountPath: \"/snapshot-data\",\n\t\t\t},\n\t\t},\n\t}\n\tif args.ShowTree {\n\t\tpodArgs = &types.CreatePodArgs{\n\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\tNamespace:      args.Namespace,\n\t\t\tRunAsUser:      args.RunAsUser,\n\t\t\tContainerImage: \"alpine:3.19\",\n\t\t\tCommand:        []string{\"/bin/sh\"},\n\t\t\tContainerArgs:  []string{\"-c\", \"while true; do sleep 3600; done\"},\n\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\tpvc.Name: {\n\t\t\t\t\tMountPath: \"/snapshot-data\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\tpod, err := s.createAppOps.CreatePod(ctx, podArgs)\n\tif err != nil {\n\t\treturn nil, pvc, errors.Wrap(err, \"failed to create browse Pod\")\n\t}\n\tif err = s.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil {\n\t\treturn pod, pvc, errors.Wrap(err, \"pod failed to become ready\")\n\t}\n\treturn pod, pvc, nil\n}\n\nfunc (s *snapshotBrowserSteps) ExecuteTreeCommand(ctx context.Context, args *types.SnapshotBrowseArgs, pod *v1.Pod) (string, error) {\n\tcommand := []string{\"tree\", \"/snapshot-data\"}\n\tstdout, err := s.kubeExecutor.Exec(ctx, args.Namespace, pod.Name, pod.Spec.Containers[0].Name, command)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"error running command:(%v)\", command)\n\t}\n\treturn stdout, nil\n}\n\nfunc (s *snapshotBrowserSteps) PortForwardAPod(ctx context.Context, pod *v1.Pod, localPort int) error {\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tstopChan, readyChan, errChan := make(chan struct{}, 1), make(chan struct{}, 1), make(chan string)\n\tout, errOut := new(bytes.Buffer), new(bytes.Buffer)\n\tcfg, err := s.portForwardOps.FetchRestConfig()\n\tif err != nil {\n\t\treturn errors.New(\"failed to fetch rest config\")\n\t}\n\tsigs := make(chan os.Signal, 1)\n\tsignal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)\n\tgo func() {\n\t\t<-sigs\n\t\tfmt.Println(\"Stopping port forward\")\n\t\tclose(stopChan)\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\tpfArgs := &types.PortForwardAPodRequest{\n\t\t\tRestConfig:   cfg,\n\t\t\tPod:          pod,\n\t\t\tLocalPort:    localPort,\n\t\t\tPodPort:      80,\n\t\t\tOutStream:    bytes.Buffer(*out),\n\t\t\tErrOutStream: bytes.Buffer(*errOut),\n\t\t\tStopCh:       stopChan,\n\t\t\tReadyCh:      readyChan,\n\t\t}\n\t\terr = s.portForwardOps.PortForwardAPod(pfArgs)\n\t\tif err != nil {\n\t\t\terrChan <- fmt.Sprintf(\"Failed to port forward (%s)\", err.Error())\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-readyChan:\n\t\turl := fmt.Sprintf(\"http://localhost:%d/\", localPort)\n\t\tfmt.Printf(\"Port forwarding is ready to get traffic. visit %s\\n\", url)\n\t\topenbrowser(url)\n\t\twg.Wait()\n\tcase msg := <-errChan:\n\t\treturn errors.New(msg)\n\t}\n\n\treturn nil\n}\n\nfunc (s *snapshotBrowserSteps) Cleanup(ctx context.Context, pvc *v1.PersistentVolumeClaim, pod *v1.Pod) {\n\tif pvc != nil {\n\t\terr := s.cleanerOps.DeletePVC(ctx, pvc.Name, pvc.Namespace)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Failed to delete PVC\", pvc)\n\t\t}\n\t}\n\tif pod != nil {\n\t\terr := s.cleanerOps.DeletePod(ctx, pod.Name, pod.Namespace)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Failed to delete Pod\", pod)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/csi/snapshot_inspector_steps_test.go",
    "content": "package csi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/kastenhq/kubestr/pkg/common\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/mocks\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t. \"gopkg.in/check.v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n)\n\nfunc (s *CSITestSuite) TestSnapshotBrowseValidateArgs(c *C) {\n\tctx := context.Background()\n\tscName := \"sc\"\n\tvscName := \"vsc\"\n\tpvcName := \"pvc\"\n\ttype fields struct {\n\t\tvalidateOps *mocks.MockArgumentValidator\n\t\tversionOps  *mocks.MockApiVersionFetcher\n\t}\n\tfor _, tc := range []struct {\n\t\targs       *types.SnapshotBrowseArgs\n\t\tprepare    func(f *fields)\n\t\terrChecker Checker\n\t}{\n\t\t{ // valid args\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tSnapshotName: \"vs\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(\n\t\t\t\t\t\t&metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), \"vs\", \"ns\", gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"vs\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: snapv1.VolumeSnapshotSpec{\n\t\t\t\t\t\t\t\tSource: snapv1.VolumeSnapshotSource{\n\t\t\t\t\t\t\t\t\tPersistentVolumeClaimName: &pvcName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tVolumeSnapshotClassName: &vscName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"pvc\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), scName).Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), \"vsc\", &metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t}).Return(&unstructured.Unstructured{\n\t\t\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\t\tcommon.VolSnapClassDriverKey: \"p1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{ // driver mismatch\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tSnapshotName: \"vs\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(\n\t\t\t\t\t\t&metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), \"vs\", \"ns\", gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"vs\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: snapv1.VolumeSnapshotSpec{\n\t\t\t\t\t\t\t\tSource: snapv1.VolumeSnapshotSource{\n\t\t\t\t\t\t\t\t\tPersistentVolumeClaimName: &pvcName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tVolumeSnapshotClassName: &vscName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"pvc\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), \"vsc\", &metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t}).Return(&unstructured.Unstructured{\n\t\t\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\t\tcommon.VolSnapClassDriverKey: \"p2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // vsc error\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tSnapshotName: \"vs\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), \"vs\", \"ns\", gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"vs\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: snapv1.VolumeSnapshotSpec{\n\t\t\t\t\t\t\t\tSource: snapv1.VolumeSnapshotSource{\n\t\t\t\t\t\t\t\t\tPersistentVolumeClaimName: &pvcName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tVolumeSnapshotClassName: &vscName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"pvc\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"vsc error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // get driver versionn error\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tSnapshotName: \"vs\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, fmt.Errorf(\"driver version error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // sc error\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tSnapshotName: \"vs\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), \"vs\", \"ns\", gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"vs\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: snapv1.VolumeSnapshotSpec{\n\t\t\t\t\t\t\t\tSource: snapv1.VolumeSnapshotSource{\n\t\t\t\t\t\t\t\t\tPersistentVolumeClaimName: &pvcName,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tVolumeSnapshotClassName: &vscName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidatePVC(gomock.Any(), \"pvc\", \"ns\").Return(\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\t\t\t\t\t\tVolumeName:       \"vol\",\n\t\t\t\t\t\t\t\tStorageClassName: &scName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"sc error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate vs error\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tSnapshotName: \"vs\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(nil, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"validate vs error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate ns error\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tSnapshotName: \"vs\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(fmt.Errorf(\"validate ns error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate vs error\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tSnapshotName: \"\",\n\t\t\t\tNamespace:    \"ns\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // validate ns error\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tSnapshotName: \"dfd\",\n\t\t\t\tNamespace:    \"\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tvalidateOps: mocks.NewMockArgumentValidator(ctrl),\n\t\t\tversionOps:  mocks.NewMockApiVersionFetcher(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &snapshotBrowserSteps{\n\t\t\tvalidateOps:     f.validateOps,\n\t\t\tversionFetchOps: f.versionOps,\n\t\t}\n\t\t_, _, err := stepper.ValidateArgs(ctx, tc.args)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestCreateInspectorApplicationForSnapshot(c *C) {\n\tctx := context.Background()\n\tresourceQuantity := resource.MustParse(\"1Gi\")\n\tsnapshotAPIGroup := \"snapshot.storage.k8s.io\"\n\ttype fields struct {\n\t\tcreateAppOps *mocks.MockApplicationCreator\n\t}\n\tfor _, tc := range []struct {\n\t\targs       *types.SnapshotBrowseArgs\n\t\tsnapshot   *snapv1.VolumeSnapshot\n\t\tsc         *sv1.StorageClass\n\t\tprepare    func(f *fields)\n\t\terrChecker Checker\n\t\tpodChecker Checker\n\t\tpvcChecker Checker\n\t}{\n\t\t{\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tRunAsUser: 100,\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"vs\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{\n\t\t\t\t\t\tGenerateName: clonedPVCGenerateName,\n\t\t\t\t\t\tStorageClass: \"sc\",\n\t\t\t\t\t\tNamespace:    \"ns\",\n\t\t\t\t\t\tDataSource: &v1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &snapshotAPIGroup,\n\t\t\t\t\t\t\tKind:     \"VolumeSnapshot\",\n\t\t\t\t\t\t\tName:     \"vs\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t\t}).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tContainerArgs:  []string{\"--noauth\", \"-r\", \"/snapshot-data\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"filebrowser/filebrowser:v2\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"pvc\": {\n\t\t\t\t\t\t\t\tMountPath: \"/snapshot-data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pod\").Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tRunAsUser: 100,\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"vs\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{\n\t\t\t\t\t\tGenerateName: clonedPVCGenerateName,\n\t\t\t\t\t\tStorageClass: \"sc\",\n\t\t\t\t\t\tNamespace:    \"ns\",\n\t\t\t\t\t\tDataSource: &v1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &snapshotAPIGroup,\n\t\t\t\t\t\t\tKind:     \"VolumeSnapshot\",\n\t\t\t\t\t\t\tName:     \"vs\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t\t}).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tContainerArgs:  []string{\"--noauth\", \"-r\", \"/snapshot-data\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"filebrowser/filebrowser:v2\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"pvc\": {\n\t\t\t\t\t\t\t\tMountPath: \"/snapshot-data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pod\").Return(fmt.Errorf(\"pod ready error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tRunAsUser: 100,\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"vs\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"pod  error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.SnapshotBrowseArgs{\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tRunAsUser: 100,\n\t\t\t},\n\t\t\tsc: &sv1.StorageClass{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"sc\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"vs\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t\tpvcChecker: IsNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tcreateAppOps: mocks.NewMockApplicationCreator(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &snapshotBrowserSteps{\n\t\t\tcreateAppOps: f.createAppOps,\n\t\t}\n\t\tpod, pvc, err := stepper.CreateInspectorApplication(ctx, tc.args, tc.snapshot, tc.sc)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Check(pod, tc.podChecker)\n\t\tc.Check(pvc, tc.pvcChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestSnapshotBrowseCleanup(c *C) {\n\tctx := context.Background()\n\tgroupversion := &metav1.GroupVersionForDiscovery{\n\t\tGroupVersion: \"gv\",\n\t\tVersion:      \"v\",\n\t}\n\ttype fields struct {\n\t\tcleanerOps *mocks.MockCleaner\n\t}\n\tfor _, tc := range []struct {\n\t\tpvc     *v1.PersistentVolumeClaim\n\t\tpod     *v1.Pod\n\t\tprepare func(f *fields)\n\t}{\n\t\t{\n\t\t\tpvc: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePVC(ctx, \"pvc\", \"ns\").Return(nil),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePod(ctx, \"pod\", \"ns\").Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpvc: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pvc\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePVC(ctx, \"pvc\", \"ns\").Return(fmt.Errorf(\"err\")),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePod(ctx, \"pod\", \"ns\").Return(fmt.Errorf(\"err\")),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tcleanerOps: mocks.NewMockCleaner(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &snapshotBrowserSteps{\n\t\t\tcleanerOps:           f.cleanerOps,\n\t\t\tSnapshotGroupVersion: groupversion,\n\t\t}\n\t\tstepper.Cleanup(ctx, tc.pvc, tc.pod)\n\t}\n}\n"
  },
  {
    "path": "pkg/csi/snapshot_inspector_test.go",
    "content": "package csi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/mocks\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t. \"gopkg.in/check.v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/client-go/dynamic\"\n\tfakedynamic \"k8s.io/client-go/dynamic/fake\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\nfunc (s *CSITestSuite) TestRunSnapshotBrowseHelper(c *C) {\n\tctx := context.Background()\n\ttype fields struct {\n\t\tstepperOps *mocks.MockSnapshotBrowserStepper\n\t}\n\tfor _, tc := range []struct {\n\t\tkubeCli    kubernetes.Interface\n\t\tdynCli     dynamic.Interface\n\t\targs       *types.SnapshotBrowseArgs\n\t\tprepare    func(f *fields)\n\t\terrChecker Checker\n\t}{\n\t\t{\n\t\t\t// success\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.SnapshotBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{}, &sv1.StorageClass{}, nil,\n\t\t\t\t\t),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(),\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{}, &sv1.StorageClass{},\n\t\t\t\t\t).Return(\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t\tf.stepperOps.EXPECT().PortForwardAPod(gomock.Any(),\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, gomock.Any(),\n\t\t\t\t\t).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(),\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\t// portforward failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.SnapshotBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().PortForwardAPod(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf(\"portforward error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// createapp failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.SnapshotBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateInspectorApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf(\"createapp error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// fetch snapshot failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.SnapshotBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf(\"snapshot error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// validate failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.SnapshotBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf(\"validate error\")),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// emptycli failure\n\t\t\tkubeCli: nil,\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs:    &types.SnapshotBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\t// emptydyncli failure\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  nil,\n\t\t\targs:    &types.SnapshotBrowseArgs{},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any(), gomock.Any()),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tstepperOps: mocks.NewMockSnapshotBrowserStepper(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\trunner := &SnapshotBrowseRunner{\n\t\t\tKubeCli:      tc.kubeCli,\n\t\t\tDynCli:       tc.dynCli,\n\t\t\tbrowserSteps: f.stepperOps,\n\t\t}\n\t\terr := runner.RunSnapshotBrowseHelper(ctx, tc.args)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestSnapshotBrowseRunner(c *C) {\n\tctx := context.Background()\n\tr := &SnapshotBrowseRunner{\n\t\tbrowserSteps: &snapshotBrowserSteps{},\n\t}\n\terr := r.RunSnapshotBrowseHelper(ctx, nil)\n\tc.Check(err, NotNil)\n}\n"
  },
  {
    "path": "pkg/csi/snapshot_restore.go",
    "content": "package csi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/kastenhq/kubestr/pkg/common\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t\"github.com/pkg/errors\"\n\tv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/client-go/dynamic\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\nconst (\n\toriginalPVCGenerateName = \"kubestr-csi-original-pvc\"\n\toriginalPodGenerateName = \"kubestr-csi-original-pod\"\n\tclonedPVCGenerateName   = \"kubestr-csi-cloned-pvc\"\n\tclonedPodGenerateName   = \"kubestr-csi-cloned-pod\"\n\tcreatedByLabel          = \"created-by-kubestr-csi\"\n\tclonePrefix             = \"kubestr-clone-\"\n\tsnapshotPrefix          = \"kubestr-snapshot-\"\n)\n\ntype SnapshotRestoreRunner struct {\n\tKubeCli kubernetes.Interface\n\tDynCli  dynamic.Interface\n\tsrSteps SnapshotRestoreStepper\n}\n\nfunc (r *SnapshotRestoreRunner) RunSnapshotRestore(ctx context.Context, args *types.CSISnapshotRestoreArgs) (*types.CSISnapshotRestoreResults, error) {\n\tif r.KubeCli == nil || r.DynCli == nil {\n\t\treturn &types.CSISnapshotRestoreResults{}, fmt.Errorf(\"cli uninitialized\")\n\t}\n\tif args == nil {\n\t\treturn &types.CSISnapshotRestoreResults{}, fmt.Errorf(\"snapshot args not specified\")\n\t}\n\tr.srSteps = &snapshotRestoreSteps{\n\t\tvalidateOps: &validateOperations{\n\t\t\tkubeCli: r.KubeCli,\n\t\t\tdynCli:  r.DynCli,\n\t\t},\n\t\tversionFetchOps: &apiVersionFetch{\n\t\t\tkubeCli: r.KubeCli,\n\t\t},\n\t\tcreateAppOps: &applicationCreate{\n\t\t\tkubeCli:               r.KubeCli,\n\t\t\tk8sObjectReadyTimeout: args.K8sObjectReadyTimeout,\n\t\t},\n\t\tdataValidatorOps: &validateData{\n\t\t\tkubeCli: r.KubeCli,\n\t\t},\n\t\tsnapshotCreateOps: &snapshotCreate{\n\t\t\tkubeCli: r.KubeCli,\n\t\t\tdynCli:  r.DynCli,\n\t\t},\n\t\tcleanerOps: &cleanse{\n\t\t\tkubeCli: r.KubeCli,\n\t\t\tdynCli:  r.DynCli,\n\t\t},\n\t}\n\treturn r.RunSnapshotRestoreHelper(ctx, args)\n}\n\nfunc (r *SnapshotRestoreRunner) RunSnapshotRestoreHelper(ctx context.Context, args *types.CSISnapshotRestoreArgs) (*types.CSISnapshotRestoreResults, error) {\n\tresults := &types.CSISnapshotRestoreResults{}\n\tvar err error\n\tif r.KubeCli == nil || r.DynCli == nil {\n\t\treturn results, fmt.Errorf(\"cli uninitialized\")\n\t}\n\tif err := r.srSteps.ValidateArgs(ctx, args); err != nil {\n\t\treturn results, errors.Wrap(err, \"failed to validate arguments\")\n\t}\n\tdata := time.Now().Format(\"20060102150405\")\n\n\tfmt.Println(\"Creating application\")\n\tresults.OriginalPod, results.OriginalPVC, err = r.srSteps.CreateApplication(ctx, args, data)\n\n\tif err == nil {\n\t\tif results.OriginalPod != nil && results.OriginalPVC != nil {\n\t\t\tfmt.Printf(\"  -> Created pod (%s) and pvc (%s)\\n\", results.OriginalPod.Name, results.OriginalPVC.Name)\n\t\t}\n\t\terr = r.srSteps.ValidateData(ctx, results.OriginalPod, data)\n\t}\n\n\tsnapName := snapshotPrefix + data\n\tif err == nil {\n\t\tfmt.Println(\"Taking a snapshot\")\n\t\tresults.Snapshot, err = r.srSteps.SnapshotApplication(ctx, args, results.OriginalPVC, snapName)\n\t}\n\n\tif err == nil {\n\t\tif results.Snapshot != nil {\n\t\t\tfmt.Printf(\"  -> Created snapshot (%s)\\n\", results.Snapshot.Name)\n\t\t}\n\t\tfmt.Println(\"Restoring application\")\n\t\tresults.ClonedPod, results.ClonedPVC, err = r.srSteps.RestoreApplication(ctx, args, results.Snapshot)\n\t}\n\n\tif err == nil {\n\t\tif results.ClonedPod != nil && results.ClonedPVC != nil {\n\t\t\tfmt.Printf(\"  -> Restored pod (%s) and pvc (%s)\\n\", results.ClonedPod.Name, results.ClonedPVC.Name)\n\t\t}\n\t\terr = r.srSteps.ValidateData(ctx, results.ClonedPod, data)\n\t}\n\n\tif args.Cleanup {\n\t\tfmt.Println(\"Cleaning up resources\")\n\t\t// don't let Cancelled/DeadlineExceeded context affect cleanup\n\t\tr.srSteps.Cleanup(context.Background(), results)\n\t}\n\n\treturn results, err\n}\n\n//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_snapshot_restore_stepper.go -package=mocks . SnapshotRestoreStepper\ntype SnapshotRestoreStepper interface {\n\tValidateArgs(ctx context.Context, args *types.CSISnapshotRestoreArgs) error\n\tCreateApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, data string) (*v1.Pod, *v1.PersistentVolumeClaim, error)\n\tValidateData(ctx context.Context, pod *v1.Pod, data string) error\n\tSnapshotApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, pvc *v1.PersistentVolumeClaim, snapshotName string) (*snapv1.VolumeSnapshot, error)\n\tRestoreApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, snapshot *snapv1.VolumeSnapshot) (*v1.Pod, *v1.PersistentVolumeClaim, error)\n\tCleanup(ctx context.Context, results *types.CSISnapshotRestoreResults)\n}\n\ntype snapshotRestoreSteps struct {\n\tvalidateOps          ArgumentValidator\n\tversionFetchOps      ApiVersionFetcher\n\tcreateAppOps         ApplicationCreator\n\tdataValidatorOps     DataValidator\n\tsnapshotCreateOps    SnapshotCreator\n\tcleanerOps           Cleaner\n\tSnapshotGroupVersion *metav1.GroupVersionForDiscovery\n}\n\nfunc (s *snapshotRestoreSteps) ValidateArgs(ctx context.Context, args *types.CSISnapshotRestoreArgs) error {\n\tif err := args.Validate(); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate input arguments\")\n\t}\n\tif err := s.validateOps.ValidateNamespace(ctx, args.Namespace); err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate Namespace\")\n\t}\n\tsc, err := s.validateOps.ValidateStorageClass(ctx, args.StorageClass)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate Storageclass\")\n\t}\n\n\tgroupVersion, err := s.versionFetchOps.GetCSISnapshotGroupVersion()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to fetch groupVersion\")\n\t}\n\ts.SnapshotGroupVersion = groupVersion\n\n\tuVSC, err := s.validateOps.ValidateVolumeSnapshotClass(ctx, args.VolumeSnapshotClass, groupVersion)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to validate VolumeSnapshotClass\")\n\t}\n\n\tvscDriver := getDriverNameFromUVSC(*uVSC, groupVersion.GroupVersion)\n\tif sc.Provisioner != vscDriver {\n\t\treturn fmt.Errorf(\"provisioner for StorageClass (%s) and VolumeSnapshotClass driver (%s) are different\", sc.Provisioner, vscDriver)\n\t}\n\treturn nil\n}\n\nfunc (s *snapshotRestoreSteps) CreateApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, genString string) (*v1.Pod, *v1.PersistentVolumeClaim, error) {\n\tpvcArgs := &types.CreatePVCArgs{\n\t\tGenerateName: originalPVCGenerateName,\n\t\tStorageClass: args.StorageClass,\n\t\tNamespace:    args.Namespace,\n\t}\n\tpvc, err := s.createAppOps.CreatePVC(ctx, pvcArgs)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to create PVC\")\n\t}\n\tpodArgs := &types.CreatePodArgs{\n\t\tGenerateName:   originalPodGenerateName,\n\t\tNamespace:      args.Namespace,\n\t\tRunAsUser:      args.RunAsUser,\n\t\tContainerImage: args.ContainerImage,\n\t\tCommand:        []string{\"/bin/sh\"},\n\t\tContainerArgs:  []string{\"-c\", fmt.Sprintf(\"echo '%s' >> /data/out.txt; sync; tail -f /dev/null\", genString)},\n\t\tPVCMap: map[string]types.VolumePath{\n\t\t\tpvc.Name: {\n\t\t\t\tMountPath: \"/data\",\n\t\t\t},\n\t\t},\n\t}\n\tpod, err := s.createAppOps.CreatePod(ctx, podArgs)\n\tif err != nil {\n\t\treturn nil, pvc, errors.Wrap(err, \"failed to create pod\")\n\t}\n\n\tif err = s.createAppOps.WaitForPVCReady(ctx, args.Namespace, pvc.Name); err != nil {\n\t\treturn pod, pvc, errors.Wrap(err, \"PVC failed to become ready\")\n\t}\n\n\tif err = s.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil {\n\t\treturn pod, pvc, errors.Wrap(err, \"pod failed to become ready\")\n\t}\n\treturn pod, pvc, nil\n}\n\nfunc (s *snapshotRestoreSteps) ValidateData(ctx context.Context, pod *v1.Pod, data string) error {\n\tpodData, err := s.dataValidatorOps.FetchPodData(ctx, pod.Name, pod.Namespace)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to fetch data from pod. Failure may be due to permissions issues, try again with runAsUser=1000 option\")\n\t}\n\tif podData != data {\n\t\treturn fmt.Errorf(\"string didn't match (%s , %s)\", podData, data)\n\t}\n\treturn nil\n}\n\nfunc (s *snapshotRestoreSteps) SnapshotApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, pvc *v1.PersistentVolumeClaim, snapshotName string) (*snapv1.VolumeSnapshot, error) {\n\tsnapshotter, err := s.snapshotCreateOps.NewSnapshotter()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load snapshotter\")\n\t}\n\tcreateSnapshotArgs := &types.CreateSnapshotArgs{\n\t\tNamespace:           args.Namespace,\n\t\tPVCName:             pvc.Name,\n\t\tVolumeSnapshotClass: args.VolumeSnapshotClass,\n\t\tSnapshotName:        snapshotName,\n\t}\n\tsnapshot, err := s.snapshotCreateOps.CreateSnapshot(ctx, snapshotter, createSnapshotArgs)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create snapshot\")\n\t}\n\tif !args.SkipCFSCheck {\n\t\tcfsArgs := &types.CreateFromSourceCheckArgs{\n\t\t\tVolumeSnapshotClass: args.VolumeSnapshotClass,\n\t\t\tSnapshotName:        snapshot.Name,\n\t\t\tNamespace:           args.Namespace,\n\t\t}\n\t\tif err = s.snapshotCreateOps.CreateFromSourceCheck(ctx, snapshotter, cfsArgs, s.SnapshotGroupVersion); err != nil {\n\t\t\treturn snapshot, errors.Wrap(err, \"failed to create duplicate snapshot from source. To skip check use '--skipcfs=true' option\")\n\t\t}\n\t}\n\treturn snapshot, nil\n}\n\nfunc (s *snapshotRestoreSteps) RestoreApplication(ctx context.Context, args *types.CSISnapshotRestoreArgs, snapshot *snapv1.VolumeSnapshot) (*v1.Pod, *v1.PersistentVolumeClaim, error) {\n\tsnapshotAPIGroup := \"snapshot.storage.k8s.io\"\n\tsnapshotKind := \"VolumeSnapshot\"\n\tdataSource := &v1.TypedLocalObjectReference{\n\t\tAPIGroup: &snapshotAPIGroup,\n\t\tKind:     snapshotKind,\n\t\tName:     snapshot.Name,\n\t}\n\tpvcArgs := &types.CreatePVCArgs{\n\t\tGenerateName: clonedPVCGenerateName,\n\t\tStorageClass: args.StorageClass,\n\t\tNamespace:    args.Namespace,\n\t\tDataSource:   dataSource,\n\t\tRestoreSize:  snapshot.Status.RestoreSize,\n\t}\n\tpvc, err := s.createAppOps.CreatePVC(ctx, pvcArgs)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"failed to restore PVC\")\n\t}\n\tpodArgs := &types.CreatePodArgs{\n\t\tGenerateName:   clonedPodGenerateName,\n\t\tNamespace:      args.Namespace,\n\t\tRunAsUser:      args.RunAsUser,\n\t\tContainerImage: args.ContainerImage,\n\t\tCommand:        []string{\"/bin/sh\"},\n\t\tContainerArgs:  []string{\"-c\", \"tail -f /dev/null\"},\n\t\tPVCMap: map[string]types.VolumePath{\n\t\t\tpvc.Name: {\n\t\t\t\tMountPath: \"/data\",\n\t\t\t},\n\t\t},\n\t}\n\tpod, err := s.createAppOps.CreatePod(ctx, podArgs)\n\tif err != nil {\n\t\treturn nil, pvc, errors.Wrap(err, \"failed to create restored pod\")\n\t}\n\n\tif err = s.createAppOps.WaitForPVCReady(ctx, args.Namespace, pvc.Name); err != nil {\n\t\treturn pod, pvc, errors.Wrap(err, \"PVC failed to become ready\")\n\t}\n\n\tif err = s.createAppOps.WaitForPodReady(ctx, args.Namespace, pod.Name); err != nil {\n\t\treturn pod, pvc, errors.Wrap(err, \"pod failed to become ready\")\n\t}\n\treturn pod, pvc, nil\n}\n\nfunc (s *snapshotRestoreSteps) Cleanup(ctx context.Context, results *types.CSISnapshotRestoreResults) {\n\tif results == nil {\n\t\treturn\n\t}\n\tif results.OriginalPVC != nil {\n\t\terr := s.cleanerOps.DeletePVC(ctx, results.OriginalPVC.Name, results.OriginalPVC.Namespace)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error deleting original PVC (%s) - (%v)\\n\", results.OriginalPVC.Name, err)\n\t\t}\n\t}\n\tif results.OriginalPod != nil {\n\t\terr := s.cleanerOps.DeletePod(ctx, results.OriginalPod.Name, results.OriginalPod.Namespace)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error deleting original Pod (%s) - (%v)\\n\", results.OriginalPod.Name, err)\n\t\t}\n\t}\n\tif results.ClonedPVC != nil {\n\t\terr := s.cleanerOps.DeletePVC(ctx, results.ClonedPVC.Name, results.ClonedPVC.Namespace)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error deleting cloned PVC (%s) - (%v)\\n\", results.ClonedPVC.Name, err)\n\t\t}\n\t}\n\tif results.ClonedPod != nil {\n\t\terr := s.cleanerOps.DeletePod(ctx, results.ClonedPod.Name, results.ClonedPod.Namespace)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error deleting cloned Pod (%s) - (%v)\\n\", results.ClonedPod.Name, err)\n\t\t}\n\t}\n\tif results.Snapshot != nil {\n\t\terr := s.cleanerOps.DeleteSnapshot(ctx, results.Snapshot.Name, results.Snapshot.Namespace, s.SnapshotGroupVersion)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error deleting Snapshot (%s) - (%v)\\n\", results.Snapshot.Name, err)\n\t\t}\n\t}\n}\n\nfunc getDriverNameFromUVSC(vsc unstructured.Unstructured, version string) string {\n\tvar driverName interface{}\n\tvar ok bool\n\tif version != common.SnapshotVersion {\n\t\treturn \"\"\n\t}\n\tdriverName, ok = vsc.Object[common.VolSnapClassDriverKey]\n\tif !ok {\n\t\treturn \"\"\n\t}\n\tdriver, ok := driverName.(string)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\treturn driver\n}\n"
  },
  {
    "path": "pkg/csi/snapshot_restore_steps_test.go",
    "content": "package csi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/kastenhq/kubestr/pkg/common\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/mocks\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t. \"gopkg.in/check.v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n)\n\nfunc (s *CSITestSuite) TestValidateArgs(c *C) {\n\tctx := context.Background()\n\ttype fields struct {\n\t\tvalidateOps *mocks.MockArgumentValidator\n\t\tversionOps  *mocks.MockApiVersionFetcher\n\t}\n\tfor _, tc := range []struct {\n\t\targs       *types.CSISnapshotRestoreArgs\n\t\tprepare    func(f *fields)\n\t\terrChecker Checker\n\t}{\n\t\t{ // valid args\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:        \"sc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), \"sc\").Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(\n\t\t\t\t\t\t&metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), \"vsc\", &metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t}).Return(&unstructured.Unstructured{\n\t\t\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\t\tcommon.VolSnapClassDriverKey: \"p1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{ // driver mismatch\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:        \"sc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), \"sc\").Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(\n\t\t\t\t\t\t&metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), \"vsc\", &metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t}).Return(&unstructured.Unstructured{\n\t\t\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\t\tcommon.VolSnapClassDriverKey: \"p2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // vsc error\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:        \"sc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), \"sc\").Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(\n\t\t\t\t\t\t&metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateVolumeSnapshotClass(gomock.Any(), \"vsc\", &metav1.GroupVersionForDiscovery{\n\t\t\t\t\t\tGroupVersion: common.SnapshotVersion,\n\t\t\t\t\t}).Return(nil, fmt.Errorf(\"vsc error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // groupversion error\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:        \"sc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), \"sc\").Return(\n\t\t\t\t\t\t&sv1.StorageClass{\n\t\t\t\t\t\t\tProvisioner: \"p1\",\n\t\t\t\t\t\t}, nil),\n\t\t\t\t\tf.versionOps.EXPECT().GetCSISnapshotGroupVersion().Return(\n\t\t\t\t\t\tnil, fmt.Errorf(\"groupversion error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:        \"sc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(nil),\n\t\t\t\t\tf.validateOps.EXPECT().ValidateStorageClass(gomock.Any(), \"sc\").Return(\n\t\t\t\t\t\tnil, fmt.Errorf(\"sc error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:        \"sc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validateOps.EXPECT().ValidateNamespace(gomock.Any(), \"ns\").Return(fmt.Errorf(\"ns error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:        \"\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t}, {\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:        \"sc\",\n\t\t\t\tVolumeSnapshotClass: \"\",\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t}, {\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:        \"sc\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tNamespace:           \"\",\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tvalidateOps: mocks.NewMockArgumentValidator(ctrl),\n\t\t\tversionOps:  mocks.NewMockApiVersionFetcher(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &snapshotRestoreSteps{\n\t\t\tvalidateOps:     f.validateOps,\n\t\t\tversionFetchOps: f.versionOps,\n\t\t}\n\t\terr := stepper.ValidateArgs(ctx, tc.args)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestCreateApplication(c *C) {\n\tctx := context.Background()\n\ttype fields struct {\n\t\tcreateAppOps *mocks.MockApplicationCreator\n\t}\n\tfor _, tc := range []struct {\n\t\targs       *types.CSISnapshotRestoreArgs\n\t\tgenString  string\n\t\tprepare    func(f *fields)\n\t\terrChecker Checker\n\t\tpodChecker Checker\n\t\tpvcChecker Checker\n\t}{\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:   \"sc\",\n\t\t\t\tNamespace:      \"ns\",\n\t\t\t\tRunAsUser:      100,\n\t\t\t\tContainerImage: \"image\",\n\t\t\t},\n\t\t\tgenString: \"some string\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{\n\t\t\t\t\t\tGenerateName: originalPVCGenerateName,\n\t\t\t\t\t\tStorageClass: \"sc\",\n\t\t\t\t\t\tNamespace:    \"ns\",\n\t\t\t\t\t}).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   originalPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tCommand:        []string{\"/bin/sh\"},\n\t\t\t\t\t\tContainerArgs:  []string{\"-c\", \"echo 'some string' >> /data/out.txt; sync; tail -f /dev/null\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"image\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"pvc1\": {\n\t\t\t\t\t\t\t\tMountPath: \"/data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPVCReady(gomock.Any(), \"ns\", \"pvc1\").Return(nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pod1\").Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:   \"sc\",\n\t\t\t\tNamespace:      \"ns\",\n\t\t\t\tRunAsUser:      100,\n\t\t\t\tContainerImage: \"image\",\n\t\t\t},\n\t\t\tgenString: \"some string\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{\n\t\t\t\t\t\tGenerateName: originalPVCGenerateName,\n\t\t\t\t\t\tStorageClass: \"sc\",\n\t\t\t\t\t\tNamespace:    \"ns\",\n\t\t\t\t\t}).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   originalPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tCommand:        []string{\"/bin/sh\"},\n\t\t\t\t\t\tContainerArgs:  []string{\"-c\", \"echo 'some string' >> /data/out.txt; sync; tail -f /dev/null\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"image\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"pvc1\": {\n\t\t\t\t\t\t\t\tMountPath: \"/data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPVCReady(gomock.Any(), \"ns\", \"pvc1\").Return(nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pod1\").Return(fmt.Errorf(\"pod ready error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:   \"sc\",\n\t\t\t\tNamespace:      \"ns\",\n\t\t\t\tRunAsUser:      100,\n\t\t\t\tContainerImage: \"image\",\n\t\t\t},\n\t\t\tgenString: \"some string\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"create pod error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:   \"sc\",\n\t\t\t\tNamespace:      \"ns\",\n\t\t\t\tRunAsUser:      100,\n\t\t\t\tContainerImage: \"image\",\n\t\t\t},\n\t\t\tgenString: \"some string\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"create pvc error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t\tpvcChecker: IsNil,\n\t\t},\n\t\t{ // PVC times out provisioning\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:   \"sc\",\n\t\t\t\tNamespace:      \"ns\",\n\t\t\t\tRunAsUser:      100,\n\t\t\t\tContainerImage: \"image\",\n\t\t\t},\n\t\t\tgenString: \"some string\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{\n\t\t\t\t\t\tGenerateName: originalPVCGenerateName,\n\t\t\t\t\t\tStorageClass: \"sc\",\n\t\t\t\t\t\tNamespace:    \"ns\",\n\t\t\t\t\t}).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   originalPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tCommand:        []string{\"/bin/sh\"},\n\t\t\t\t\t\tContainerArgs:  []string{\"-c\", \"echo 'some string' >> /data/out.txt; sync; tail -f /dev/null\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"image\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"pvc1\": {\n\t\t\t\t\t\t\t\tMountPath: \"/data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPVCReady(gomock.Any(), \"ns\", \"pvc1\").Return(fmt.Errorf(\"rate: Wait(n=1) would exceed context deadline\")),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pvc1\").Times(0),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tcreateAppOps: mocks.NewMockApplicationCreator(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &snapshotRestoreSteps{\n\t\t\tcreateAppOps: f.createAppOps,\n\t\t}\n\t\tpod, pvc, err := stepper.CreateApplication(ctx, tc.args, tc.genString)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Check(pod, tc.podChecker)\n\t\tc.Check(pvc, tc.pvcChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestSnapshotApplication(c *C) {\n\tctx := context.Background()\n\tsnapshotter := &fakeSnapshotter{name: \"snapshotter\"}\n\tgroupversion := &metav1.GroupVersionForDiscovery{\n\t\tGroupVersion: \"gv\",\n\t\tVersion:      \"v\",\n\t}\n\ttype fields struct {\n\t\tsnapshotOps *mocks.MockSnapshotCreator\n\t}\n\tfor _, tc := range []struct {\n\t\targs         *types.CSISnapshotRestoreArgs\n\t\tpvc          *v1.PersistentVolumeClaim\n\t\tsnapshotName string\n\t\tprepare      func(f *fields)\n\t\terrChecker   Checker\n\t\tsnapChecker  Checker\n\t}{\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t},\n\t\t\tpvc: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshotName: \"snap1\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil),\n\t\t\t\t\tf.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{\n\t\t\t\t\t\tNamespace:           \"ns\",\n\t\t\t\t\t\tPVCName:             \"pvc1\",\n\t\t\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t\t\t}).Return(&snapv1.VolumeSnapshot{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"createdName\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.snapshotOps.EXPECT().CreateFromSourceCheck(gomock.Any(), snapshotter, &types.CreateFromSourceCheckArgs{\n\t\t\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\t\t\tSnapshotName:        \"createdName\",\n\t\t\t\t\t\tNamespace:           \"ns\",\n\t\t\t\t\t}, groupversion).Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker:  IsNil,\n\t\t\tsnapChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\tSkipCFSCheck:        true,\n\t\t\t},\n\t\t\tpvc: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshotName: \"snap1\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil),\n\t\t\t\t\tf.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{\n\t\t\t\t\t\tNamespace:           \"ns\",\n\t\t\t\t\t\tPVCName:             \"pvc1\",\n\t\t\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t\t\t}).Return(&snapv1.VolumeSnapshot{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"createdName\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker:  IsNil,\n\t\t\tsnapChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t},\n\t\t\tpvc: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshotName: \"snap1\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil),\n\t\t\t\t\tf.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{\n\t\t\t\t\t\tNamespace:           \"ns\",\n\t\t\t\t\t\tPVCName:             \"pvc1\",\n\t\t\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t\t\t}).Return(&snapv1.VolumeSnapshot{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"createdName\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.snapshotOps.EXPECT().CreateFromSourceCheck(gomock.Any(), snapshotter, &types.CreateFromSourceCheckArgs{\n\t\t\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\t\t\tSnapshotName:        \"createdName\",\n\t\t\t\t\t\tNamespace:           \"ns\",\n\t\t\t\t\t}, groupversion).Return(fmt.Errorf(\"cfs error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker:  NotNil,\n\t\t\tsnapChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t},\n\t\t\tpvc: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshotName: \"snap1\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.snapshotOps.EXPECT().NewSnapshotter().Return(snapshotter, nil),\n\t\t\t\t\tf.snapshotOps.EXPECT().CreateSnapshot(gomock.Any(), snapshotter, &types.CreateSnapshotArgs{\n\t\t\t\t\t\tNamespace:           \"ns\",\n\t\t\t\t\t\tPVCName:             \"pvc1\",\n\t\t\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t\t\t\tSnapshotName:        \"snap1\",\n\t\t\t\t\t}).Return(nil, fmt.Errorf(\"create snapshot error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker:  NotNil,\n\t\t\tsnapChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tNamespace:           \"ns\",\n\t\t\t\tVolumeSnapshotClass: \"vsc\",\n\t\t\t},\n\t\t\tpvc: &v1.PersistentVolumeClaim{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsnapshotName: \"snap1\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.snapshotOps.EXPECT().NewSnapshotter().Return(nil, fmt.Errorf(\"snapshotter error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker:  NotNil,\n\t\t\tsnapChecker: IsNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tsnapshotOps: mocks.NewMockSnapshotCreator(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &snapshotRestoreSteps{\n\t\t\tsnapshotCreateOps:    f.snapshotOps,\n\t\t\tSnapshotGroupVersion: groupversion,\n\t\t}\n\t\tsnapshot, err := stepper.SnapshotApplication(ctx, tc.args, tc.pvc, tc.snapshotName)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Check(snapshot, tc.snapChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestRestoreApplication(c *C) {\n\tctx := context.Background()\n\tresourceQuantity := resource.MustParse(\"1Gi\")\n\tsnapshotAPIGroup := \"snapshot.storage.k8s.io\"\n\ttype fields struct {\n\t\tcreateAppOps *mocks.MockApplicationCreator\n\t}\n\tfor _, tc := range []struct {\n\t\targs       *types.CSISnapshotRestoreArgs\n\t\tsnapshot   *snapv1.VolumeSnapshot\n\t\tprepare    func(f *fields)\n\t\terrChecker Checker\n\t\tpodChecker Checker\n\t\tpvcChecker Checker\n\t}{\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:   \"sc\",\n\t\t\t\tNamespace:      \"ns\",\n\t\t\t\tRunAsUser:      100,\n\t\t\t\tContainerImage: \"image\",\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"snap1\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{\n\t\t\t\t\t\tGenerateName: clonedPVCGenerateName,\n\t\t\t\t\t\tStorageClass: \"sc\",\n\t\t\t\t\t\tNamespace:    \"ns\",\n\t\t\t\t\t\tDataSource: &v1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &snapshotAPIGroup,\n\t\t\t\t\t\t\tKind:     \"VolumeSnapshot\",\n\t\t\t\t\t\t\tName:     \"snap1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t\t}).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tCommand:        []string{\"/bin/sh\"},\n\t\t\t\t\t\tContainerArgs:  []string{\"-c\", \"tail -f /dev/null\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"image\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"pvc1\": {\n\t\t\t\t\t\t\t\tMountPath: \"/data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPVCReady(gomock.Any(), \"ns\", \"pvc1\").Return(nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pod1\").Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:   \"sc\",\n\t\t\t\tNamespace:      \"ns\",\n\t\t\t\tRunAsUser:      100,\n\t\t\t\tContainerImage: \"image\",\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"snap1\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), &types.CreatePVCArgs{\n\t\t\t\t\t\tGenerateName: clonedPVCGenerateName,\n\t\t\t\t\t\tStorageClass: \"sc\",\n\t\t\t\t\t\tNamespace:    \"ns\",\n\t\t\t\t\t\tDataSource: &v1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &snapshotAPIGroup,\n\t\t\t\t\t\t\tKind:     \"VolumeSnapshot\",\n\t\t\t\t\t\t\tName:     \"snap1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t\t}).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), &types.CreatePodArgs{\n\t\t\t\t\t\tGenerateName:   clonedPodGenerateName,\n\t\t\t\t\t\tNamespace:      \"ns\",\n\t\t\t\t\t\tCommand:        []string{\"/bin/sh\"},\n\t\t\t\t\t\tContainerArgs:  []string{\"-c\", \"tail -f /dev/null\"},\n\t\t\t\t\t\tRunAsUser:      100,\n\t\t\t\t\t\tContainerImage: \"image\",\n\t\t\t\t\t\tPVCMap: map[string]types.VolumePath{\n\t\t\t\t\t\t\t\"pvc1\": {\n\t\t\t\t\t\t\t\tMountPath: \"/data\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}).Return(&v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pod1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPVCReady(gomock.Any(), \"ns\", \"pvc1\").Return(nil),\n\t\t\t\t\tf.createAppOps.EXPECT().WaitForPodReady(gomock.Any(), \"ns\", \"pod1\").Return(fmt.Errorf(\"pod ready error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: NotNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:   \"sc\",\n\t\t\t\tNamespace:      \"ns\",\n\t\t\t\tRunAsUser:      100,\n\t\t\t\tContainerImage: \"image\",\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"snap1\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(&v1.PersistentVolumeClaim{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"pvc1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil),\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePod(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"create pod error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t\tpvcChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tStorageClass:   \"sc\",\n\t\t\t\tNamespace:      \"ns\",\n\t\t\t\tRunAsUser:      100,\n\t\t\t\tContainerImage: \"image\",\n\t\t\t},\n\t\t\tsnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: \"snap1\",\n\t\t\t\t},\n\t\t\t\tStatus: &snapv1.VolumeSnapshotStatus{\n\t\t\t\t\tRestoreSize: &resourceQuantity,\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.createAppOps.EXPECT().CreatePVC(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"create pvc error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t\tpodChecker: IsNil,\n\t\t\tpvcChecker: IsNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tcreateAppOps: mocks.NewMockApplicationCreator(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &snapshotRestoreSteps{\n\t\t\tcreateAppOps: f.createAppOps,\n\t\t}\n\t\tpod, pvc, err := stepper.RestoreApplication(ctx, tc.args, tc.snapshot)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Check(pod, tc.podChecker)\n\t\tc.Check(pvc, tc.pvcChecker)\n\t}\n}\n\nfunc (s *CSITestSuite) TestCleanup(c *C) {\n\tctx := context.Background()\n\tgroupversion := &metav1.GroupVersionForDiscovery{\n\t\tGroupVersion: \"gv\",\n\t\tVersion:      \"v\",\n\t}\n\ttype fields struct {\n\t\tcleanerOps *mocks.MockCleaner\n\t}\n\tfor _, tc := range []struct {\n\t\tresults *types.CSISnapshotRestoreResults\n\t\tprepare func(f *fields)\n\t}{\n\t\t{\n\t\t\tresults: nil,\n\t\t},\n\t\t{\n\t\t\tresults: &types.CSISnapshotRestoreResults{\n\t\t\t\tOriginalPVC: &v1.PersistentVolumeClaim{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tOriginalPod: &v1.Pod{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tClonedPVC: &v1.PersistentVolumeClaim{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pvc2\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tClonedPod: &v1.Pod{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pod2\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"snapshot\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePVC(ctx, \"pvc1\", \"ns\").Return(nil),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePod(ctx, \"pod1\", \"ns\").Return(nil),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePVC(ctx, \"pvc2\", \"ns\").Return(nil),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePod(ctx, \"pod2\", \"ns\").Return(nil),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeleteSnapshot(ctx, \"snapshot\", \"ns\", groupversion).Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tresults: &types.CSISnapshotRestoreResults{\n\t\t\t\tOriginalPVC: &v1.PersistentVolumeClaim{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tOriginalPod: &v1.Pod{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tClonedPVC: &v1.PersistentVolumeClaim{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pvc2\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tClonedPod: &v1.Pod{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pod2\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"snapshot\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePVC(ctx, \"pvc1\", \"ns\").Return(fmt.Errorf(\"err\")),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePod(ctx, \"pod1\", \"ns\").Return(fmt.Errorf(\"err\")),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePVC(ctx, \"pvc2\", \"ns\").Return(fmt.Errorf(\"err\")),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeletePod(ctx, \"pod2\", \"ns\").Return(fmt.Errorf(\"err\")),\n\t\t\t\t\tf.cleanerOps.EXPECT().DeleteSnapshot(ctx, \"snapshot\", \"ns\", groupversion).Return(fmt.Errorf(\"err\")),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tcleanerOps: mocks.NewMockCleaner(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &snapshotRestoreSteps{\n\t\t\tcleanerOps:           f.cleanerOps,\n\t\t\tSnapshotGroupVersion: groupversion,\n\t\t}\n\t\tstepper.Cleanup(ctx, tc.results)\n\t}\n}\n\nfunc (s *CSITestSuite) TestValidateData(c *C) {\n\tctx := context.Background()\n\ttype fields struct {\n\t\tvalidatorOps *mocks.MockDataValidator\n\t}\n\tfor _, tc := range []struct {\n\t\tprepare    func(f *fields)\n\t\tpod        *v1.Pod\n\t\tdata       string\n\t\terrChecker Checker\n\t}{\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tdata: \"somedata\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validatorOps.EXPECT().FetchPodData(context.Background(), \"pod\", \"ns\").Return(\"somedata\", nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tdata: \"somedata\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validatorOps.EXPECT().FetchPodData(context.Background(), \"pod\", \"ns\").Return(\"someotherdata\", nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{\n\t\t\tpod: &v1.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"pod\",\n\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tdata: \"somedata\",\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.validatorOps.EXPECT().FetchPodData(context.Background(), \"pod\", \"ns\").Return(\"\", fmt.Errorf(\"error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tvalidatorOps: mocks.NewMockDataValidator(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\tstepper := &snapshotRestoreSteps{\n\t\t\tdataValidatorOps: f.validatorOps,\n\t\t}\n\t\terr := stepper.ValidateData(ctx, tc.pod, tc.data)\n\t\tc.Check(err, tc.errChecker)\n\t}\n}\n"
  },
  {
    "path": "pkg/csi/snapshot_restore_test.go",
    "content": "package csi\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/mocks\"\n\t\"github.com/kastenhq/kubestr/pkg/csi/types\"\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\t. \"gopkg.in/check.v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/client-go/dynamic\"\n\tfakedynamic \"k8s.io/client-go/dynamic/fake\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype CSITestSuite struct{}\n\nvar _ = Suite(&CSITestSuite{})\n\nfunc (s *CSITestSuite) TestRunSnapshotRestoreHelper(c *C) {\n\tctx := context.Background()\n\ttype fields struct {\n\t\tstepperOps *mocks.MockSnapshotRestoreStepper\n\t}\n\tfor _, tc := range []struct {\n\t\tkubeCli    kubernetes.Interface\n\t\tdynCli     dynamic.Interface\n\t\targs       *types.CSISnapshotRestoreArgs\n\t\tprepare    func(f *fields)\n\t\tresult     *types.CSISnapshotRestoreResults\n\t\terrChecker Checker\n\t}{\n\t\t{ // success\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tCleanup: true,\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateData(gomock.Any(), &v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(),\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}, gomock.Any(),\n\t\t\t\t\t).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName: \"snapshot\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t\tf.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(),\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName: \"snapshot\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t).Return(\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod2\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc2\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t),\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateData(gomock.Any(), &v1.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:      \"pod2\",\n\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}, gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().Cleanup(gomock.Any(), gomock.Any()).Return(),\n\t\t\t\t)\n\t\t\t},\n\t\t\tresult: &types.CSISnapshotRestoreResults{\n\t\t\t\tOriginalPVC: &v1.PersistentVolumeClaim{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tOriginalPod: &v1.Pod{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tClonedPVC: &v1.PersistentVolumeClaim{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pvc2\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tClonedPod: &v1.Pod{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pod2\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"snapshot\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{ // no cleanup\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tCleanup: false,\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t)\n\t\t\t},\n\t\t\tresult:     &types.CSISnapshotRestoreResults{},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{ // restored data validation fails\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tCleanup: false,\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf(\"validation error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\tresult:     &types.CSISnapshotRestoreResults{},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // restore error, objects still returned\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tCleanup: false,\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod2\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc2\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tfmt.Errorf(\"restore error\"),\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t},\n\t\t\tresult: &types.CSISnapshotRestoreResults{\n\t\t\t\tClonedPVC: &v1.PersistentVolumeClaim{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pvc2\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tClonedPod: &v1.Pod{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pod2\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // restore error, no objects returned\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tCleanup: false,\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().RestoreApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf(\"restore error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\tresult:     &types.CSISnapshotRestoreResults{},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // snapshot error, object still returned\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tCleanup: false,\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&snapv1.VolumeSnapshot{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName: \"snapshot\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tfmt.Errorf(\"snapshot error\"),\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t},\n\t\t\tresult: &types.CSISnapshotRestoreResults{\n\t\t\t\tSnapshot: &snapv1.VolumeSnapshot{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"snapshot\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // snapshot error, object not returned\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tCleanup: false,\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().SnapshotApplication(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf(\"snapshot error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\tresult:     &types.CSISnapshotRestoreResults{},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // created data validation error\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tCleanup: false,\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, nil),\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateData(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf(\"validation error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\tresult:     &types.CSISnapshotRestoreResults{},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // create error, objects still returned\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tCleanup: false,\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(\n\t\t\t\t\t\t&v1.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&v1.PersistentVolumeClaim{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tfmt.Errorf(\"create error\"),\n\t\t\t\t\t),\n\t\t\t\t)\n\t\t\t},\n\t\t\tresult: &types.CSISnapshotRestoreResults{\n\t\t\t\tOriginalPVC: &v1.PersistentVolumeClaim{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pvc1\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tOriginalPod: &v1.Pod{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"pod1\",\n\t\t\t\t\t\tNamespace: \"ns\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // create error, objects not returned\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tCleanup: false,\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(nil),\n\t\t\t\t\tf.stepperOps.EXPECT().CreateApplication(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil, fmt.Errorf(\"create error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\tresult:     &types.CSISnapshotRestoreResults{},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // args validate error\n\t\t\tkubeCli: fake.NewSimpleClientset(),\n\t\t\tdynCli:  fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\targs: &types.CSISnapshotRestoreArgs{\n\t\t\t\tCleanup: false,\n\t\t\t},\n\t\t\tprepare: func(f *fields) {\n\t\t\t\tgomock.InOrder(\n\t\t\t\t\tf.stepperOps.EXPECT().ValidateArgs(gomock.Any(), gomock.Any()).Return(fmt.Errorf(\"create error\")),\n\t\t\t\t)\n\t\t\t},\n\t\t\tresult:     &types.CSISnapshotRestoreResults{},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // empty cli\n\t\t\tkubeCli:    nil,\n\t\t\tdynCli:     fakedynamic.NewSimpleDynamicClient(runtime.NewScheme()),\n\t\t\tresult:     &types.CSISnapshotRestoreResults{},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // empty dyncli\n\t\t\tkubeCli:    fake.NewSimpleClientset(),\n\t\t\tdynCli:     nil,\n\t\t\tresult:     &types.CSISnapshotRestoreResults{},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tctrl := gomock.NewController(c)\n\t\tdefer ctrl.Finish()\n\t\tf := fields{\n\t\t\tstepperOps: mocks.NewMockSnapshotRestoreStepper(ctrl),\n\t\t}\n\t\tif tc.prepare != nil {\n\t\t\ttc.prepare(&f)\n\t\t}\n\t\trunner := &SnapshotRestoreRunner{\n\t\t\tKubeCli: tc.kubeCli,\n\t\t\tDynCli:  tc.dynCli,\n\t\t\tsrSteps: f.stepperOps,\n\t\t}\n\t\tresult, err := runner.RunSnapshotRestoreHelper(ctx, tc.args)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Assert(result, DeepEquals, tc.result)\n\t}\n}\n\nfunc (s *CSITestSuite) TestRunSnapshotRestoreRunner(c *C) {\n\tctx := context.Background()\n\tr := &SnapshotRestoreRunner{}\n\t_, err := r.RunSnapshotRestore(ctx, nil)\n\tc.Check(err, NotNil)\n}\n"
  },
  {
    "path": "pkg/csi/types/csi_types.go",
    "content": "package types\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"time\"\n\n\tsnapv1 \"github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\t\"k8s.io/client-go/rest\"\n)\n\ntype CSISnapshotRestoreArgs struct {\n\tStorageClass          string\n\tVolumeSnapshotClass   string\n\tNamespace             string\n\tRunAsUser             int64\n\tContainerImage        string\n\tCleanup               bool\n\tSkipCFSCheck          bool\n\tK8sObjectReadyTimeout time.Duration\n}\n\nfunc (a *CSISnapshotRestoreArgs) Validate() error {\n\tif a.StorageClass == \"\" || a.VolumeSnapshotClass == \"\" || a.Namespace == \"\" {\n\t\treturn fmt.Errorf(\"required fields are missing: (StorageClass, VolumeSnapshotClass, Namespace)\")\n\t}\n\treturn nil\n}\n\ntype CSISnapshotRestoreResults struct {\n\tOriginalPVC *v1.PersistentVolumeClaim\n\tOriginalPod *v1.Pod\n\tSnapshot    *snapv1.VolumeSnapshot\n\tClonedPVC   *v1.PersistentVolumeClaim\n\tClonedPod   *v1.Pod\n}\n\ntype CreatePVCArgs struct {\n\tName         string // Only one of Name or\n\tGenerateName string // GenerateName should be specified.\n\tStorageClass string\n\tNamespace    string\n\tDataSource   *v1.TypedLocalObjectReference\n\tRestoreSize  *resource.Quantity\n\tVolumeMode   *v1.PersistentVolumeMode // missing implies v1.PersistentVolumeFilesystem\n}\n\nfunc (c *CreatePVCArgs) Validate() error {\n\tif (c.GenerateName == \"\" && c.Name == \"\") ||\n\t\t(c.GenerateName != \"\" && c.Name != \"\") ||\n\t\tc.StorageClass == \"\" || c.Namespace == \"\" {\n\t\treturn fmt.Errorf(\"invalid CreatePVCArgs (%#v)\", c)\n\t}\n\treturn nil\n}\n\ntype VolumePath struct {\n\tMountPath  string // Only one of MountPath or\n\tDevicePath string // DevicePath should be specified.\n}\n\ntype CreatePodArgs struct {\n\tName           string // Only one of Name or\n\tGenerateName   string // GenerateName should be specified.\n\tPVCMap         map[string]VolumePath\n\tNamespace      string\n\tRunAsUser      int64\n\tContainerImage string\n\tCommand        []string\n\tContainerArgs  []string\n}\n\nfunc (c *CreatePodArgs) Validate() error {\n\tif (c.GenerateName == \"\" && c.Name == \"\") ||\n\t\t(c.GenerateName != \"\" && c.Name != \"\") ||\n\t\t(c.Namespace == \"\") || (c.PVCMap == nil) {\n\t\treturn fmt.Errorf(\"invalid CreatePodArgs (%#v)\", c)\n\t}\n\tfor pvcName, path := range c.PVCMap {\n\t\tif pvcName == \"\" {\n\t\t\treturn fmt.Errorf(\"name for PVC is not set\")\n\t\t}\n\t\tif path.DevicePath == \"\" && path.MountPath == \"\" {\n\t\t\treturn fmt.Errorf(\"neither DevicePath nor MountPath are set, one is required\")\n\t\t}\n\t\tif path.DevicePath != \"\" && path.MountPath != \"\" {\n\t\t\treturn fmt.Errorf(\"both MountPath and DevicePath are set, only one must be set\")\n\t\t}\n\t}\n\treturn nil\n}\n\ntype CreateSnapshotArgs struct {\n\tNamespace           string\n\tPVCName             string\n\tVolumeSnapshotClass string\n\tSnapshotName        string\n}\n\nfunc (c *CreateSnapshotArgs) Validate() error {\n\tif c.Namespace == \"\" || c.PVCName == \"\" || c.VolumeSnapshotClass == \"\" || c.SnapshotName == \"\" {\n\t\treturn fmt.Errorf(\"invalid CreateSnapshotArgs (%v)\", c)\n\t}\n\treturn nil\n}\n\ntype FetchSnapshotArgs struct {\n\tNamespace    string\n\tSnapshotName string\n}\n\nfunc (c *FetchSnapshotArgs) Validate() error {\n\tif c.Namespace == \"\" || c.SnapshotName == \"\" {\n\t\treturn fmt.Errorf(\"invalid FetchSnapshotArgs (%v)\", c)\n\t}\n\treturn nil\n}\n\ntype CreateFromSourceCheckArgs struct {\n\tVolumeSnapshotClass string\n\tSnapshotName        string\n\tNamespace           string\n}\n\nfunc (c *CreateFromSourceCheckArgs) Validate() error {\n\tif c.VolumeSnapshotClass == \"\" || c.SnapshotName == \"\" || c.Namespace == \"\" {\n\t\treturn fmt.Errorf(\"invalid CreateFromSourceCheckArgs (%v)\", c)\n\t}\n\treturn nil\n}\n\ntype PVCBrowseArgs struct {\n\tPVCName             string\n\tNamespace           string\n\tVolumeSnapshotClass string\n\tRunAsUser           int64\n\tLocalPort           int\n\tShowTree            bool\n}\n\nfunc (p *PVCBrowseArgs) Validate() error {\n\tif p.PVCName == \"\" || p.Namespace == \"\" || p.VolumeSnapshotClass == \"\" {\n\t\treturn fmt.Errorf(\"invalid PVCBrowseArgs (%v)\", p)\n\t}\n\treturn nil\n}\n\ntype SnapshotBrowseArgs struct {\n\tSnapshotName string\n\tNamespace    string\n\tRunAsUser    int64\n\tLocalPort    int\n\tShowTree     bool\n}\n\nfunc (p *SnapshotBrowseArgs) Validate() error {\n\tif p.SnapshotName == \"\" || p.Namespace == \"\" {\n\t\treturn fmt.Errorf(\"invalid SnapshotBrowseArgs (%v)\", p)\n\t}\n\treturn nil\n}\n\ntype FileRestoreArgs struct {\n\tFromSnapshotName string\n\tFromPVCName      string\n\tToPVCName        string\n\tNamespace        string\n\tRunAsUser        int64\n\tLocalPort        int\n\tPath             string\n}\n\nfunc (f *FileRestoreArgs) Validate() error {\n\tif (f.FromSnapshotName == \"\" && f.FromPVCName == \"\") || (f.FromSnapshotName != \"\" && f.FromPVCName != \"\") {\n\t\treturn fmt.Errorf(\"either --fromSnapshot or --fromPVC argument must be specified. Both cannot be specified together\")\n\t}\n\tif f.FromPVCName != \"\" && f.ToPVCName == \"\" {\n\t\treturn fmt.Errorf(\"--toPVC argument must be specified if using --fromPVC\")\n\t}\n\tif f.Namespace == \"\" {\n\t\treturn fmt.Errorf(\"invalid FileRestoreArgs (%v)\", f)\n\t}\n\treturn nil\n}\n\ntype PortForwardAPodRequest struct {\n\t// RestConfig is the kubernetes config\n\tRestConfig *rest.Config\n\t// Pod is the selected pod for this port forwarding\n\tPod *v1.Pod\n\t// LocalPort is the local port that will be selected to expose the PodPort\n\tLocalPort int\n\t// PodPort is the target port for the pod\n\tPodPort int\n\t// Streams configures where to write or read input from\n\tOutStream    bytes.Buffer\n\tErrOutStream bytes.Buffer\n\t// StopCh is the channel used to manage the port forward lifecycle\n\tStopCh <-chan struct{}\n\t// ReadyCh communicates when the tunnel is ready to receive traffic\n\tReadyCh chan struct{}\n}\n"
  },
  {
    "path": "pkg/fio/_config.yml",
    "content": "baseurl: \"/fio\"\n"
  },
  {
    "path": "pkg/fio/dbench_license",
    "content": "MIT License\n\nCopyright (c) 2018 LogDNA\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "pkg/fio/fio.go",
    "content": "package fio\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"time\"\n\n\t\"github.com/briandowns/spinner\"\n\tkankube \"github.com/kanisterio/kanister/pkg/kube\"\n\t\"github.com/kastenhq/kubestr/pkg/common\"\n\t\"github.com/pkg/errors\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\nconst (\n\t// DefaultNS describes the default namespace\n\tDefaultNS = \"default\"\n\t// PodNamespaceEnvKey describes the pod namespace env variable\n\tPodNamespaceEnvKey = \"POD_NAMESPACE\"\n\t// DefaultFIOJob describes the default FIO job\n\tDefaultFIOJob = \"default-fio\"\n\t// KubestrFIOJobGenName describes the generate name\n\tKubestrFIOJobGenName = \"kubestr-fio\"\n\t// ConfigMapJobKey is the default fio job key\n\tConfigMapJobKey = \"fiojob\"\n\t// DefaultPVCSize is the default PVC size\n\tDefaultPVCSize = \"100Gi\"\n\t// PVCGenerateName is the name to generate for the PVC\n\tPVCGenerateName = \"kubestr-fio-pvc-\"\n\t// PodGenerateName is the name to generate for the POD\n\tPodGenerateName = \"kubestr-fio-pod-\"\n\t// ContainerName is the name of the container that runs the job\n\tContainerName = \"kubestr-fio\"\n\t// PodNameEnvKey is the name of the variable used to get the current pod name\n\tPodNameEnvKey = \"HOSTNAME\"\n\t// ConfigMapMountPath is the path where we mount the configmap\n\tConfigMapMountPath = \"/etc/fio-config\"\n\t// VolumeMountPath is the path where we mount the volume\n\tVolumeMountPath = \"/dataset\"\n\t// CreatedByFIOLabel is the key that desrcibes the label used to mark configmaps\n\tCreatedByFIOLabel = \"createdbyfio\"\n)\n\n// FIO is an interface that represents FIO related commands\ntype FIO interface {\n\tRunFio(ctx context.Context, args *RunFIOArgs) (*RunFIOResult, error) // , test config\n}\n\n// FIOrunner implments FIO\ntype FIOrunner struct {\n\tCli      kubernetes.Interface\n\tfioSteps fioSteps\n}\n\ntype RunFIOArgs struct {\n\tStorageClass   string\n\tSize           string\n\tNamespace      string\n\tNodeSelector   map[string]string\n\tFIOJobFilepath string\n\tFIOJobName     string\n\tImage          string\n}\n\nfunc (a *RunFIOArgs) Validate() error {\n\tif a.StorageClass == \"\" || a.Size == \"\" || a.Namespace == \"\" {\n\t\treturn fmt.Errorf(\"required fields are missing: (StorageClass, Size, Namespace)\")\n\t}\n\treturn nil\n}\n\ntype RunFIOResult struct {\n\tSize         string            `json:\"size,omitempty\"`\n\tStorageClass *sv1.StorageClass `json:\"storageClass,omitempty\"`\n\tFioConfig    string            `json:\"fioConfig,omitempty\"`\n\tResult       FioResult         `json:\"result,omitempty\"`\n}\n\nfunc (f *FIOrunner) RunFio(ctx context.Context, args *RunFIOArgs) (*RunFIOResult, error) {\n\tf.fioSteps = &fioStepper{\n\t\tcli:          f.Cli,\n\t\tpodReady:     &podReadyChecker{cli: f.Cli},\n\t\tkubeExecutor: &kubeExecutor{cli: f.Cli},\n\t}\n\treturn f.RunFioHelper(ctx, args)\n\n}\n\nfunc (f *FIOrunner) RunFioHelper(ctx context.Context, args *RunFIOArgs) (*RunFIOResult, error) {\n\t// create a configmap with test parameters\n\tif f.Cli == nil { // for UT purposes\n\t\treturn nil, fmt.Errorf(\"cli uninitialized\")\n\t}\n\n\tif err := args.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := f.fioSteps.validateNamespace(ctx, args.Namespace); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to find namespace (%s)\", args.Namespace)\n\t}\n\n\tif err := f.fioSteps.validateNodeSelector(ctx, args.NodeSelector); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to find nodes satisfying node selector (%v)\", args.NodeSelector)\n\t}\n\n\tsc, err := f.fioSteps.storageClassExists(ctx, args.StorageClass)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"cannot find StorageClass\")\n\t}\n\n\tconfigMap, err := f.fioSteps.loadConfigMap(ctx, args)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to create a ConfigMap\")\n\t}\n\tdefer func() {\n\t\t_ = f.fioSteps.deleteConfigMap(context.TODO(), configMap, args.Namespace)\n\t}()\n\n\ttestFileName, err := fioTestFilename(configMap.Data)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get test file name\")\n\t}\n\n\tpvc, err := f.fioSteps.createPVC(ctx, args.StorageClass, args.Size, args.Namespace)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create PVC\")\n\t}\n\tdefer func() {\n\t\t_ = f.fioSteps.deletePVC(context.TODO(), pvc.Name, args.Namespace)\n\t}()\n\tfmt.Println(\"PVC created\", pvc.Name)\n\n\tpod, err := f.fioSteps.createPod(ctx, pvc.Name, configMap.Name, testFileName, args.Namespace, args.NodeSelector, args.Image)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create POD\")\n\t}\n\tdefer func() {\n\t\t_ = f.fioSteps.deletePod(context.TODO(), pod.Name, args.Namespace)\n\t}()\n\tfmt.Println(\"Pod created\", pod.Name)\n\tfmt.Printf(\"Running FIO test (%s) on StorageClass (%s) with a PVC of Size (%s)\\n\", testFileName, args.StorageClass, args.Size)\n\tfioOutput, err := f.fioSteps.runFIOCommand(ctx, pod.Name, ContainerName, testFileName, args.Namespace)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed while running FIO test\")\n\t}\n\treturn &RunFIOResult{\n\t\tSize:         args.Size,\n\t\tStorageClass: sc,\n\t\tFioConfig:    configMap.Data[testFileName],\n\t\tResult:       fioOutput,\n\t}, nil\n}\n\ntype fioSteps interface {\n\tvalidateNamespace(ctx context.Context, namespace string) error\n\tvalidateNodeSelector(ctx context.Context, selector map[string]string) error\n\tstorageClassExists(ctx context.Context, storageClass string) (*sv1.StorageClass, error)\n\tloadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error)\n\tcreatePVC(ctx context.Context, storageclass, size, namespace string) (*v1.PersistentVolumeClaim, error)\n\tdeletePVC(ctx context.Context, pvcName, namespace string) error\n\tcreatePod(ctx context.Context, pvcName, configMapName, testFileName, namespace string, nodeSelector map[string]string, image string) (*v1.Pod, error)\n\tdeletePod(ctx context.Context, podName, namespace string) error\n\trunFIOCommand(ctx context.Context, podName, containerName, testFileName, namespace string) (FioResult, error)\n\tdeleteConfigMap(ctx context.Context, configMap *v1.ConfigMap, namespace string) error\n}\n\ntype fioStepper struct {\n\tcli          kubernetes.Interface\n\tpodReady     waitForPodReadyInterface\n\tkubeExecutor kubeExecInterface\n}\n\nfunc (s *fioStepper) validateNamespace(ctx context.Context, namespace string) error {\n\tif _, err := s.cli.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *fioStepper) validateNodeSelector(ctx context.Context, selector map[string]string) error {\n\tnodes, err := s.cli.CoreV1().Nodes().List(ctx, metav1.ListOptions{\n\t\tLabelSelector: labels.SelectorFromSet(selector).String(),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(nodes.Items) == 0 {\n\t\treturn fmt.Errorf(\"no nodes match selector\")\n\t}\n\n\treturn nil\n}\n\nfunc (s *fioStepper) storageClassExists(ctx context.Context, storageClass string) (*sv1.StorageClass, error) {\n\treturn s.cli.StorageV1().StorageClasses().Get(ctx, storageClass, metav1.GetOptions{})\n}\n\nfunc (s *fioStepper) loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error) {\n\tconfigMap := &v1.ConfigMap{\n\t\tData: make(map[string]string),\n\t}\n\tswitch {\n\tcase args.FIOJobFilepath != \"\":\n\t\tdata, err := os.ReadFile(args.FIOJobFilepath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"file reading error\")\n\t\t}\n\t\tconfigMap.Data[filepath.Base(args.FIOJobFilepath)] = string(data)\n\tcase args.FIOJobName != \"\":\n\t\tif _, ok := fioJobs[args.FIOJobName]; !ok {\n\t\t\treturn nil, fmt.Errorf(\"did not find FIO job (%s)\", args.FIOJobName)\n\t\t}\n\t\tconfigMap.Data[args.FIOJobName] = fioJobs[args.FIOJobName]\n\tdefault:\n\t\tconfigMap.Data[DefaultFIOJob] = fioJobs[DefaultFIOJob]\n\t}\n\t// create\n\tconfigMap.GenerateName = KubestrFIOJobGenName\n\tconfigMap.Labels = map[string]string{CreatedByFIOLabel: \"true\"}\n\tcm, err := s.cli.CoreV1().ConfigMaps(args.Namespace).Create(ctx, configMap, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cm, nil\n}\n\nfunc (s *fioStepper) createPVC(ctx context.Context, storageclass, size, namespace string) (*v1.PersistentVolumeClaim, error) {\n\tsizeResource, err := resource.ParseQuantity(size)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to parse PVC size (%s)\", size)\n\t}\n\tpvc := &v1.PersistentVolumeClaim{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: PVCGenerateName,\n\t\t},\n\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\tStorageClassName: &storageclass,\n\t\t\tAccessModes:      []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},\n\t\t\tResources: v1.VolumeResourceRequirements{\n\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\tv1.ResourceName(v1.ResourceStorage): sizeResource,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tcm, err := s.cli.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvc, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cm, nil\n}\n\nfunc (s *fioStepper) deletePVC(ctx context.Context, pvcName, namespace string) error {\n\treturn s.cli.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, pvcName, metav1.DeleteOptions{})\n}\n\nfunc (s *fioStepper) createPod(ctx context.Context, pvcName, configMapName, testFileName, namespace string, nodeSelector map[string]string, image string) (*v1.Pod, error) {\n\tif pvcName == \"\" || configMapName == \"\" || testFileName == \"\" {\n\t\treturn nil, fmt.Errorf(\"create pod missing required arguments\")\n\t}\n\n\tif image == \"\" {\n\t\timage = common.DefaultPodImage\n\t}\n\n\tpod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: PodGenerateName,\n\t\t\tNamespace:    namespace,\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{{\n\t\t\t\tName:    ContainerName,\n\t\t\t\tCommand: []string{\"/bin/sh\"},\n\t\t\t\tArgs:    []string{\"-c\", \"tail -f /dev/null\"},\n\t\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t\t{Name: \"persistent-storage\", MountPath: VolumeMountPath},\n\t\t\t\t\t{Name: \"config-map\", MountPath: ConfigMapMountPath},\n\t\t\t\t},\n\t\t\t\tImage: image,\n\t\t\t}},\n\t\t\tVolumes: []v1.Volume{\n\t\t\t\t{\n\t\t\t\t\tName: \"persistent-storage\",\n\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\tPersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"config-map\",\n\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\tConfigMap: &v1.ConfigMapVolumeSource{\n\t\t\t\t\t\t\tLocalObjectReference: v1.LocalObjectReference{\n\t\t\t\t\t\t\t\tName: configMapName,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tNodeSelector: nodeSelector,\n\t\t},\n\t}\n\tpodRes, err := s.cli.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn podRes, err\n\t}\n\n\terr = s.podReady.waitForPodReady(ctx, namespace, podRes.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpodRes, err = s.cli.CoreV1().Pods(namespace).Get(ctx, podRes.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn podRes, err\n\t}\n\n\treturn podRes, nil\n}\n\nfunc (s *fioStepper) deletePod(ctx context.Context, podName, namespace string) error {\n\treturn s.cli.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{})\n}\n\nfunc (s *fioStepper) runFIOCommand(ctx context.Context, podName, containerName, testFileName, namespace string) (FioResult, error) {\n\tjobFilePath := fmt.Sprintf(\"%s/%s\", ConfigMapMountPath, testFileName)\n\tcommand := []string{\"fio\", \"--directory\", VolumeMountPath, jobFilePath, \"--output-format=json\"}\n\tdone := make(chan bool, 1)\n\tvar fioOut FioResult\n\tvar stdout string\n\tvar stderr string\n\tvar err error\n\ttimestart := time.Now()\n\tgo func() {\n\t\tstdout, stderr, err = s.kubeExecutor.exec(ctx, namespace, podName, containerName, command)\n\t\tif err != nil || stderr != \"\" {\n\t\t\tif err == nil {\n\t\t\t\terr = fmt.Errorf(\"stderr when running FIO\")\n\t\t\t}\n\t\t\terr = errors.Wrapf(err, \"error running command:(%v), stderr:(%s)\", command, stderr)\n\t\t}\n\t\tdone <- true\n\t}()\n\tspin := spinner.New(spinner.CharSets[9], 100*time.Millisecond)\n\tspin.Start()\n\t<-done\n\tspin.Stop()\n\telapsed := time.Since(timestart)\n\tfmt.Println(\"Elapsed time-\", elapsed)\n\tif err != nil {\n\t\treturn fioOut, err\n\t}\n\n\terr = json.Unmarshal([]byte(stdout), &fioOut)\n\tif err != nil {\n\t\treturn fioOut, errors.Wrapf(err, \"unable to parse fio output into JSON\")\n\t}\n\n\treturn fioOut, nil\n}\n\n// deleteConfigMap only deletes a config map if it has the label\nfunc (s *fioStepper) deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap, namespace string) error {\n\tif val, ok := configMap.Labels[CreatedByFIOLabel]; ok && val == \"true\" {\n\t\treturn s.cli.CoreV1().ConfigMaps(namespace).Delete(ctx, configMap.Name, metav1.DeleteOptions{})\n\t}\n\treturn nil\n}\n\nfunc fioTestFilename(configMap map[string]string) (string, error) {\n\tif len(configMap) != 1 {\n\t\treturn \"\", fmt.Errorf(\"unable to find fio file in configmap/more than one found %v\", configMap)\n\t}\n\tvar fileName string\n\tfor key := range configMap {\n\t\tfileName = key\n\t}\n\treturn fileName, nil\n}\n\ntype waitForPodReadyInterface interface {\n\twaitForPodReady(ctx context.Context, namespace string, name string) error\n}\n\ntype podReadyChecker struct {\n\tcli kubernetes.Interface\n}\n\nfunc (p *podReadyChecker) waitForPodReady(ctx context.Context, namespace, name string) error {\n\treturn kankube.WaitForPodReady(ctx, p.cli, namespace, name)\n}\n\ntype kubeExecInterface interface {\n\texec(ctx context.Context, namespace, podName, containerName string, command []string) (string, string, error)\n}\n\ntype kubeExecutor struct {\n\tcli kubernetes.Interface\n}\n\nfunc (k *kubeExecutor) exec(ctx context.Context, namespace, podName, containerName string, command []string) (string, string, error) {\n\treturn kankube.Exec(ctx, k.cli, namespace, podName, containerName, command, nil)\n}\n"
  },
  {
    "path": "pkg/fio/fio_jobs.go",
    "content": "package fio\n\nvar fioJobs = map[string]string{\n\tDefaultFIOJob: testJob1,\n\t\"randrw\":      randReadWrite,\n}\n\nvar testJob1 = `[global]\nrandrepeat=0\nverify=0\nioengine=libaio\ndirect=1\ngtod_reduce=1\n[job1]\nname=read_iops\nbs=4K\niodepth=64\nsize=2G\nreadwrite=randread\ntime_based\nramp_time=2s\nruntime=15s\n[job2]\nname=write_iops\nbs=4K\niodepth=64\nsize=2G\nreadwrite=randwrite\ntime_based\nramp_time=2s\nruntime=15s\n[job3]\nname=read_bw\nbs=128K\niodepth=64\nsize=2G\nreadwrite=randread\ntime_based\nramp_time=2s\nruntime=15s\n[job4]\nname=write_bw\nbs=128k\niodepth=64\nsize=2G\nreadwrite=randwrite\ntime_based\nramp_time=2s\nruntime=15s\n`\n\nvar randReadWrite = `[global]\nrandrepeat=0\nverify=0\nioengine=libaio\ndirect=1\ngtod_reduce=1\n[job1]\nname=rand_readwrite\nbs=4K\niodepth=64\nsize=4G\nreadwrite=randrw\nrwmixread=75\ntime_based\nramp_time=2s\nruntime=15s\n`\n"
  },
  {
    "path": "pkg/fio/fio_test.go",
    "content": "package fio\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/kastenhq/kubestr/pkg/common\"\n\t\"github.com/pkg/errors\"\n\t. \"gopkg.in/check.v1\"\n\tv1 \"k8s.io/api/core/v1\"\n\tstoragev1 \"k8s.io/api/storage/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n\tk8stesting \"k8s.io/client-go/testing\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype FIOTestSuite struct{}\n\nvar _ = Suite(&FIOTestSuite{})\n\nfunc (s *FIOTestSuite) TestRunner(c *C) {\n\tctx := context.Background()\n\trunner := &FIOrunner{\n\t\tCli: nil,\n\t}\n\t_, err := runner.RunFio(ctx, nil)\n\tc.Check(err, NotNil)\n}\n\nfunc (s *FIOTestSuite) TestRunFioHelper(c *C) {\n\tctx := context.Background()\n\tfor i, tc := range []struct {\n\t\tcli           kubernetes.Interface\n\t\tstepper       *fakeFioStepper\n\t\targs          *RunFIOArgs\n\t\texpectedSteps []string\n\t\tchecker       Checker\n\t\texpectedCM    string\n\t\texpectedSC    string\n\t\texpectedSize  string\n\t\texpectedTFN   string\n\t\texpectedPVC   string\n\t}{\n\t\t{ // invalid args (storageclass)\n\t\t\tcli:     fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{},\n\t\t\targs:    &RunFIOArgs{},\n\t\t\tchecker: NotNil,\n\t\t},\n\t\t{ // invalid args (size)\n\t\t\tcli:     fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{},\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t},\n\t\t\tchecker: NotNil,\n\t\t},\n\t\t{ // invalid args (namespace)\n\t\t\tcli:     fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{},\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tSize:         \"100Gi\",\n\t\t\t},\n\t\t\tchecker: NotNil,\n\t\t},\n\t\t{ // namespace doesn't exist\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{\n\t\t\t\tvnErr: fmt.Errorf(\"namespace Err\"),\n\t\t\t},\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tSize:         \"100Gi\",\n\t\t\t\tNamespace:    \"foo\",\n\t\t\t},\n\t\t\tchecker:       NotNil,\n\t\t\texpectedSteps: []string{\"VN\"},\n\t\t},\n\t\t{ // no node satisfies selector\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{\n\t\t\t\tvnsErr: fmt.Errorf(\"node selector Err\"),\n\t\t\t},\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tSize:         \"100Gi\",\n\t\t\t\tNamespace:    \"foo\",\n\t\t\t},\n\t\t\tchecker:       NotNil,\n\t\t\texpectedSteps: []string{\"VN\", \"VNS\"},\n\t\t},\n\t\t{ // storageclass not found\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{\n\t\t\t\tsceErr: fmt.Errorf(\"storageclass Err\"),\n\t\t\t},\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tSize:         \"100Gi\",\n\t\t\t\tNamespace:    \"foo\",\n\t\t\t},\n\t\t\tchecker:       NotNil,\n\t\t\texpectedSteps: []string{\"VN\", \"VNS\", \"SCE\"},\n\t\t},\n\t\t{ // success\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{\n\t\t\t\tlcmConfigMap: &v1.ConfigMap{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"CM1\",\n\t\t\t\t\t},\n\t\t\t\t\tData: map[string]string{\n\t\t\t\t\t\t\"testfile.fio\": \"testfiledata\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcPVC: &v1.PersistentVolumeClaim{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"PVC\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcPod: &v1.Pod{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"Pod\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tSize:         \"100Gi\",\n\t\t\t\tNamespace:    \"foo\",\n\t\t\t},\n\t\t\tchecker:       IsNil,\n\t\t\texpectedSteps: []string{\"VN\", \"VNS\", \"SCE\", \"LCM\", \"CPVC\", \"CPOD\", \"RFIOC\", \"DPOD\", \"DPVC\", \"DCM\"},\n\t\t\texpectedSC:    \"sc\",\n\t\t\texpectedSize:  DefaultPVCSize,\n\t\t\texpectedTFN:   \"testfile.fio\",\n\t\t\texpectedCM:    \"CM1\",\n\t\t\texpectedPVC:   \"PVC\",\n\t\t},\n\t\t{ // fio test error\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{\n\t\t\t\tlcmConfigMap: &v1.ConfigMap{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"CM1\",\n\t\t\t\t\t},\n\t\t\t\t\tData: map[string]string{\n\t\t\t\t\t\t\"testfile.fio\": \"testfiledata\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcPVC: &v1.PersistentVolumeClaim{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"PVC\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcPod: &v1.Pod{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"Pod\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\trFIOErr: fmt.Errorf(\"run fio error\"),\n\t\t\t},\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tSize:         \"100Gi\",\n\t\t\t\tNamespace:    \"foo\",\n\t\t\t},\n\t\t\tchecker:       NotNil,\n\t\t\texpectedSteps: []string{\"VN\", \"VNS\", \"SCE\", \"LCM\", \"CPVC\", \"CPOD\", \"RFIOC\", \"DPOD\", \"DPVC\", \"DCM\"},\n\t\t},\n\t\t{ // create pod error\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{\n\t\t\t\tlcmConfigMap: &v1.ConfigMap{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"CM1\",\n\t\t\t\t\t},\n\t\t\t\t\tData: map[string]string{\n\t\t\t\t\t\t\"testfile.fio\": \"testfiledata\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcPVC: &v1.PersistentVolumeClaim{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"PVC\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcPod: &v1.Pod{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"Pod\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcPodErr: fmt.Errorf(\"pod create error\"),\n\t\t\t},\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tSize:         \"100Gi\",\n\t\t\t\tNamespace:    \"foo\",\n\t\t\t},\n\t\t\tchecker:       NotNil,\n\t\t\texpectedSteps: []string{\"VN\", \"VNS\", \"SCE\", \"LCM\", \"CPVC\", \"CPOD\", \"DPVC\", \"DCM\"},\n\t\t},\n\t\t{ // create PVC error\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{\n\t\t\t\tlcmConfigMap: &v1.ConfigMap{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"CM1\",\n\t\t\t\t\t},\n\t\t\t\t\tData: map[string]string{\n\t\t\t\t\t\t\"testfile.fio\": \"testfiledata\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcPVCErr: fmt.Errorf(\"pvc create error\"),\n\t\t\t},\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tSize:         \"100Gi\",\n\t\t\t\tNamespace:    \"foo\",\n\t\t\t},\n\t\t\tchecker:       NotNil,\n\t\t\texpectedSteps: []string{\"VN\", \"VNS\", \"SCE\", \"LCM\", \"CPVC\", \"DCM\"},\n\t\t},\n\t\t{ // testfilename retrieval error, more than one provided\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{\n\t\t\t\tlcmConfigMap: &v1.ConfigMap{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"CM1\",\n\t\t\t\t\t},\n\t\t\t\t\tData: map[string]string{\n\t\t\t\t\t\t\"testfile.fio\":  \"testfiledata\",\n\t\t\t\t\t\t\"testfile.fio2\": \"testfiledata\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tSize:         \"100Gi\",\n\t\t\t\tNamespace:    \"foo\",\n\t\t\t},\n\t\t\tchecker:       NotNil,\n\t\t\texpectedSteps: []string{\"VN\", \"VNS\", \"SCE\", \"LCM\", \"DCM\"},\n\t\t},\n\t\t{ // load configmap error\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tstepper: &fakeFioStepper{\n\t\t\t\tlcmErr: fmt.Errorf(\"failed to load configmap\"),\n\t\t\t},\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tStorageClass: \"sc\",\n\t\t\t\tSize:         \"100Gi\",\n\t\t\t\tNamespace:    \"foo\",\n\t\t\t},\n\t\t\tchecker:       NotNil,\n\t\t\texpectedSteps: []string{\"VN\", \"VNS\", \"SCE\", \"LCM\"},\n\t\t},\n\t} {\n\t\tc.Log(i)\n\t\tfio := &FIOrunner{\n\t\t\tCli:      tc.cli,\n\t\t\tfioSteps: tc.stepper,\n\t\t}\n\t\t_, err := fio.RunFioHelper(ctx, tc.args)\n\t\tc.Check(err, tc.checker)\n\t\tc.Assert(tc.stepper.steps, DeepEquals, tc.expectedSteps)\n\t\tif err == nil {\n\t\t\tc.Assert(tc.expectedSC, Equals, tc.stepper.cPVCExpSC)\n\t\t\tc.Assert(tc.expectedSize, Equals, tc.stepper.cPVCExpSize)\n\t\t\tc.Assert(tc.expectedTFN, Equals, tc.stepper.cPodExpFN)\n\t\t\tc.Assert(tc.expectedCM, Equals, tc.stepper.cPodExpCM)\n\t\t\tc.Assert(tc.expectedPVC, Equals, tc.stepper.cPodExpPVC)\n\t\t}\n\t}\n}\n\ntype fakeFioStepper struct {\n\tsteps []string\n\n\tvnErr error\n\n\tvnsErr error\n\n\tsceSC  *storagev1.StorageClass\n\tsceErr error\n\n\tlcmConfigMap *v1.ConfigMap\n\tlcmErr       error\n\n\tcPVCExpSC   string\n\tcPVCExpSize string\n\tcPVC        *v1.PersistentVolumeClaim\n\tcPVCErr     error\n\n\tdPVCErr error\n\n\tcPodExpFN  string\n\tcPodExpCM  string\n\tcPodExpPVC string\n\tcPod       *v1.Pod\n\tcPodErr    error\n\n\tdPodErr error\n\n\trFIOout FioResult\n\trFIOErr error\n}\n\nfunc (f *fakeFioStepper) validateNamespace(ctx context.Context, namespace string) error {\n\tf.steps = append(f.steps, \"VN\")\n\treturn f.vnErr\n}\nfunc (f *fakeFioStepper) validateNodeSelector(ctx context.Context, selector map[string]string) error {\n\tf.steps = append(f.steps, \"VNS\")\n\treturn f.vnsErr\n}\nfunc (f *fakeFioStepper) storageClassExists(ctx context.Context, storageClass string) (*storagev1.StorageClass, error) {\n\tf.steps = append(f.steps, \"SCE\")\n\treturn f.sceSC, f.sceErr\n}\nfunc (f *fakeFioStepper) loadConfigMap(ctx context.Context, args *RunFIOArgs) (*v1.ConfigMap, error) {\n\tf.steps = append(f.steps, \"LCM\")\n\treturn f.lcmConfigMap, f.lcmErr\n}\nfunc (f *fakeFioStepper) createPVC(ctx context.Context, storageclass, size, namespace string) (*v1.PersistentVolumeClaim, error) {\n\tf.steps = append(f.steps, \"CPVC\")\n\tf.cPVCExpSC = storageclass\n\tf.cPVCExpSize = size\n\treturn f.cPVC, f.cPVCErr\n}\nfunc (f *fakeFioStepper) deletePVC(ctx context.Context, pvcName, namespace string) error {\n\tf.steps = append(f.steps, \"DPVC\")\n\treturn f.dPVCErr\n}\nfunc (f *fakeFioStepper) createPod(ctx context.Context, pvcName, configMapName, testFileName, namespace string, nodeSelector map[string]string, image string) (*v1.Pod, error) {\n\tf.steps = append(f.steps, \"CPOD\")\n\tf.cPodExpCM = configMapName\n\tf.cPodExpFN = testFileName\n\tf.cPodExpPVC = pvcName\n\treturn f.cPod, f.cPodErr\n}\nfunc (f *fakeFioStepper) deletePod(ctx context.Context, podName, namespace string) error {\n\tf.steps = append(f.steps, \"DPOD\")\n\treturn f.dPodErr\n}\nfunc (f *fakeFioStepper) runFIOCommand(ctx context.Context, podName, containerName, testFileName, namespace string) (FioResult, error) {\n\tf.steps = append(f.steps, \"RFIOC\")\n\treturn f.rFIOout, f.rFIOErr\n}\nfunc (f *fakeFioStepper) deleteConfigMap(ctx context.Context, configMap *v1.ConfigMap, namespace string) error {\n\tf.steps = append(f.steps, \"DCM\")\n\treturn nil\n}\n\nfunc (s *FIOTestSuite) TestStorageClassExists(c *C) {\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\tcli          kubernetes.Interface\n\t\tstorageClass string\n\t\tchecker      Checker\n\t}{\n\t\t{\n\t\t\tcli:          fake.NewSimpleClientset(),\n\t\t\tstorageClass: \"sc\",\n\t\t\tchecker:      NotNil,\n\t\t},\n\t\t{\n\t\t\tcli:          fake.NewSimpleClientset(&storagev1.StorageClass{ObjectMeta: metav1.ObjectMeta{Name: \"sc\"}}),\n\t\t\tstorageClass: \"sc\",\n\t\t\tchecker:      IsNil,\n\t\t},\n\t} {\n\t\tstepper := &fioStepper{cli: tc.cli}\n\t\t_, err := stepper.storageClassExists(ctx, tc.storageClass)\n\t\tc.Check(err, tc.checker)\n\t}\n}\n\nfunc (s *FIOTestSuite) TestValidateNamespace(c *C) {\n\tctx := context.Background()\n\tstepper := &fioStepper{cli: fake.NewSimpleClientset()}\n\terr := stepper.validateNamespace(ctx, \"ns\")\n\tc.Assert(err, NotNil)\n\tstepper = &fioStepper{cli: fake.NewSimpleClientset(&v1.Namespace{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"ns\",\n\t\t},\n\t})}\n\terr = stepper.validateNamespace(ctx, \"ns\")\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *FIOTestSuite) TestValidateNodeSelector(c *C) {\n\tctx := context.Background()\n\tstepper := &fioStepper{cli: fake.NewSimpleClientset(\n\t\t&v1.Node{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"a\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"key\": \"value\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t&v1.Node{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"b\",\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"key\": \"value\",\n\t\t\t\t\t\"foo\": \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)}\n\tfor _, tc := range []struct {\n\t\tnodeSelector map[string]string\n\t\tchecker      Checker\n\t}{\n\t\t{ // 0 nodes satisfy\n\t\t\tnodeSelector: map[string]string{\n\t\t\t\t\"not\": \"present\",\n\t\t\t},\n\t\t\tchecker: NotNil,\n\t\t},\n\t\t{ // 1 node satisfies\n\t\t\tnodeSelector: map[string]string{\n\t\t\t\t\"key\": \"value\",\n\t\t\t\t\"foo\": \"bar\",\n\t\t\t},\n\t\t\tchecker: IsNil,\n\t\t},\n\t\t{ // 2 nodes satisfy\n\t\t\tnodeSelector: map[string]string{\n\t\t\t\t\"key\": \"value\",\n\t\t\t},\n\t\t\tchecker: IsNil,\n\t\t},\n\t} {\n\t\terr := stepper.validateNodeSelector(ctx, tc.nodeSelector)\n\t\tc.Check(err, tc.checker)\n\t}\n}\n\nfunc (s *FIOTestSuite) TestLoadConfigMap(c *C) {\n\tctx := context.Background()\n\tfile, err := os.CreateTemp(\"\", \"tempTLCfile\")\n\tc.Check(err, IsNil)\n\tdefer func() {\n\t\tc.Check(os.Remove(file.Name()), IsNil)\n\t}()\n\n\tfor i, tc := range []struct {\n\t\tcli           kubernetes.Interface\n\t\tconfigMapName string\n\t\tjobName       string\n\t\targs          *RunFIOArgs\n\t\tcmChecker     Checker\n\t\terrChecker    Checker\n\t\tfailCreates   bool\n\t\thasLabel      bool\n\t}{\n\t\t{ // provided file name not found\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tFIOJobFilepath: \"nonexistantfile\",\n\t\t\t},\n\t\t\tcmChecker:  IsNil,\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // specified config map found\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tFIOJobFilepath: file.Name(),\n\t\t\t\tFIOJobName:     \"random\", // won't use this case\n\t\t\t},\n\t\t\tcmChecker:  NotNil,\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{ // specified job name, not found\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tFIOJobName: \"random\",\n\t\t\t},\n\t\t\tcmChecker:  IsNil,\n\t\t\terrChecker: NotNil,\n\t\t},\n\t\t{ // specified job name, found\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\targs: &RunFIOArgs{\n\t\t\t\tFIOJobName: DefaultFIOJob,\n\t\t\t},\n\t\t\tcmChecker:  NotNil,\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{ // use default job\n\t\t\tcli:        fake.NewSimpleClientset(),\n\t\t\targs:       &RunFIOArgs{},\n\t\t\tcmChecker:  NotNil,\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{ // Fails to create configMap\n\t\t\tcli:         fake.NewSimpleClientset(),\n\t\t\tcmChecker:   IsNil,\n\t\t\terrChecker:  NotNil,\n\t\t\targs:        &RunFIOArgs{},\n\t\t\tfailCreates: true,\n\t\t},\n\t} {\n\t\tc.Log(i)\n\t\tstepper := &fioStepper{cli: tc.cli}\n\t\tif tc.failCreates {\n\t\t\tstepper.cli.(*fake.Clientset).PrependReactor(\"create\", \"configmaps\", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\treturn true, nil, errors.New(\"Error creating object\")\n\t\t\t})\n\t\t}\n\t\tcm, err := stepper.loadConfigMap(ctx, tc.args)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Check(cm, tc.cmChecker)\n\t\tif cm != nil {\n\t\t\t_, ok := cm.Labels[CreatedByFIOLabel]\n\t\t\tc.Assert(ok, Equals, true)\n\t\t}\n\t}\n}\n\nfunc (s *FIOTestSuite) TestCreatePVC(c *C) {\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\tcli          kubernetes.Interface\n\t\tstorageclass string\n\t\tsize         string\n\t\terrChecker   Checker\n\t\tpvcChecker   Checker\n\t\tfailCreates  bool\n\t}{\n\t\t{\n\t\t\tcli:          fake.NewSimpleClientset(),\n\t\t\tstorageclass: \"fakesc\",\n\t\t\tsize:         \"20Gi\",\n\t\t\terrChecker:   IsNil,\n\t\t\tpvcChecker:   NotNil,\n\t\t},\n\t\t{ // Fails to create pvc\n\t\t\tcli:          fake.NewSimpleClientset(),\n\t\t\tstorageclass: \"fakesc\",\n\t\t\tsize:         \"10Gi\",\n\t\t\tpvcChecker:   IsNil,\n\t\t\terrChecker:   NotNil,\n\t\t\tfailCreates:  true,\n\t\t},\n\t\t{ // parse error\n\t\t\tcli:          fake.NewSimpleClientset(),\n\t\t\tstorageclass: \"fakesc\",\n\t\t\tsize:         \"Not a quantity\",\n\t\t\tpvcChecker:   IsNil,\n\t\t\terrChecker:   NotNil,\n\t\t},\n\t} {\n\t\tstepper := &fioStepper{cli: tc.cli}\n\t\tif tc.failCreates {\n\t\t\tstepper.cli.(*fake.Clientset).PrependReactor(\"create\", \"*\", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\treturn true, nil, errors.New(\"Error creating object\")\n\t\t\t})\n\t\t}\n\t\tpvc, err := stepper.createPVC(ctx, tc.storageclass, tc.size, DefaultNS)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Check(pvc, tc.pvcChecker)\n\t\tif pvc != nil {\n\t\t\tc.Assert(pvc.GenerateName, Equals, PVCGenerateName)\n\t\t\tc.Assert(*pvc.Spec.StorageClassName, Equals, tc.storageclass)\n\t\t\tvalue, ok := pvc.Spec.Resources.Requests.Storage().AsInt64()\n\t\t\tc.Assert(ok, Equals, true)\n\t\t\tc.Assert(value, Equals, int64(21474836480))\n\t\t}\n\t}\n}\n\nfunc (s *FIOTestSuite) TestDeletePVC(c *C) {\n\tctx := context.Background()\n\tstepper := &fioStepper{cli: fake.NewSimpleClientset(&v1.PersistentVolumeClaim{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"pvc\",\n\t\t\tNamespace: DefaultNS,\n\t\t}})}\n\terr := stepper.deletePVC(ctx, \"pvc\", DefaultNS)\n\tc.Assert(err, IsNil)\n\terr = stepper.deletePVC(ctx, \"pvc\", DefaultNS)\n\tc.Assert(err, NotNil)\n}\n\nfunc (s *FIOTestSuite) TestCreatPod(c *C) {\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\tpvcName       string\n\t\tconfigMapName string\n\t\ttestFileName  string\n\t\tnodeSelector  map[string]string\n\t\timage         string\n\t\treactor       []k8stesting.Reactor\n\t\tpodReadyErr   error\n\t\terrChecker    Checker\n\t}{\n\t\t{\n\t\t\tpvcName:       \"pvc\",\n\t\t\tconfigMapName: \"cm\",\n\t\t\ttestFileName:  \"testfile\",\n\t\t\tnodeSelector: map[string]string{\n\t\t\t\t\"key\": \"\",\n\t\t\t\t\"foo\": \"bar\",\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tpvcName:       \"pvc\",\n\t\t\tconfigMapName: \"cm\",\n\t\t\ttestFileName:  \"testfile\",\n\t\t\terrChecker:    NotNil,\n\t\t\treactor: []k8stesting.Reactor{\n\t\t\t\t&k8stesting.SimpleReactor{\n\t\t\t\t\tVerb:     \"create\",\n\t\t\t\t\tResource: \"*\",\n\t\t\t\t\tReaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\t\treturn true, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"pod\"}}, nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&k8stesting.SimpleReactor{\n\t\t\t\t\tVerb:     \"get\",\n\t\t\t\t\tResource: \"*\",\n\t\t\t\t\tReaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\t\treturn true, nil, errors.New(\"Error getting object\")\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpvcName:       \"pvc\",\n\t\t\tconfigMapName: \"cm\",\n\t\t\ttestFileName:  \"testfile\",\n\t\t\terrChecker:    NotNil,\n\t\t\treactor: []k8stesting.Reactor{\n\t\t\t\t&k8stesting.SimpleReactor{\n\t\t\t\t\tVerb:     \"create\",\n\t\t\t\t\tResource: \"*\",\n\t\t\t\t\tReaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\t\treturn true, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"pod\"}}, nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tpodReadyErr: fmt.Errorf(\"pod ready error\"),\n\t\t},\n\t\t{\n\t\t\tpvcName:       \"pvc\",\n\t\t\tconfigMapName: \"cm\",\n\t\t\ttestFileName:  \"testfile\",\n\t\t\terrChecker:    NotNil,\n\t\t\treactor: []k8stesting.Reactor{\n\t\t\t\t&k8stesting.SimpleReactor{\n\t\t\t\t\tVerb:     \"create\",\n\t\t\t\t\tResource: \"*\",\n\t\t\t\t\tReaction: func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {\n\t\t\t\t\t\treturn true, nil, fmt.Errorf(\"pod create error\")\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpvcName:       \"pvc\",\n\t\t\tconfigMapName: \"cm\",\n\t\t\ttestFileName:  \"\",\n\t\t\timage:         \"someotherimage\",\n\t\t\terrChecker:    NotNil,\n\t\t},\n\t\t{\n\t\t\tpvcName:       \"\",\n\t\t\tconfigMapName: \"cm\",\n\t\t\ttestFileName:  \"asdf\",\n\t\t\terrChecker:    NotNil,\n\t\t},\n\t\t{\n\t\t\tpvcName:       \"pvc\",\n\t\t\tconfigMapName: \"\",\n\t\t\ttestFileName:  \"asd\",\n\t\t\terrChecker:    NotNil,\n\t\t},\n\t} {\n\t\tstepper := &fioStepper{\n\t\t\tcli:      fake.NewSimpleClientset(),\n\t\t\tpodReady: &fakePodReadyChecker{prcErr: tc.podReadyErr},\n\t\t}\n\t\tif tc.reactor != nil {\n\t\t\tstepper.cli.(*fake.Clientset).ReactionChain = tc.reactor\n\t\t}\n\t\tpod, err := stepper.createPod(ctx, tc.pvcName, tc.configMapName, tc.testFileName, DefaultNS, tc.nodeSelector, tc.image)\n\t\tc.Check(err, tc.errChecker)\n\t\tif err == nil {\n\t\t\tc.Assert(pod.GenerateName, Equals, PodGenerateName)\n\t\t\tc.Assert(len(pod.Spec.Volumes), Equals, 2)\n\t\t\tfor _, vol := range pod.Spec.Volumes {\n\t\t\t\tswitch vol.Name {\n\t\t\t\tcase \"persistent-storage\":\n\t\t\t\t\tc.Assert(vol.PersistentVolumeClaim.ClaimName, Equals, tc.pvcName)\n\t\t\t\tcase \"config-map\":\n\t\t\t\t\tc.Assert(vol.ConfigMap.Name, Equals, tc.configMapName)\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.Assert(len(pod.Spec.Containers), Equals, 1)\n\t\t\tc.Assert(pod.Spec.Containers[0].Name, Equals, ContainerName)\n\t\t\tc.Assert(pod.Spec.Containers[0].Command, DeepEquals, []string{\"/bin/sh\"})\n\t\t\tc.Assert(pod.Spec.Containers[0].Args, DeepEquals, []string{\"-c\", \"tail -f /dev/null\"})\n\t\t\tc.Assert(pod.Spec.Containers[0].VolumeMounts, DeepEquals, []v1.VolumeMount{\n\t\t\t\t{Name: \"persistent-storage\", MountPath: VolumeMountPath},\n\t\t\t\t{Name: \"config-map\", MountPath: ConfigMapMountPath},\n\t\t\t})\n\t\t\tif tc.image == \"\" {\n\t\t\t\tc.Assert(pod.Spec.Containers[0].Image, Equals, common.DefaultPodImage)\n\t\t\t} else {\n\t\t\t\tc.Assert(pod.Spec.Containers[0].Image, Equals, tc.image)\n\t\t\t}\n\t\t\tc.Assert(pod.Spec.NodeSelector, DeepEquals, tc.nodeSelector)\n\t\t}\n\t}\n}\n\nfunc (s *FIOTestSuite) TestDeletePod(c *C) {\n\tctx := context.Background()\n\tstepper := &fioStepper{cli: fake.NewSimpleClientset(&v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"pod\",\n\t\t\tNamespace: DefaultNS,\n\t\t}})}\n\terr := stepper.deletePod(ctx, \"pod\", DefaultNS)\n\tc.Assert(err, IsNil)\n\terr = stepper.deletePod(ctx, \"pod\", DefaultNS)\n\tc.Assert(err, NotNil)\n}\n\nfunc (s *FIOTestSuite) TestFioTestFileName(c *C) {\n\tfor _, tc := range []struct {\n\t\tconfigMap  map[string]string\n\t\tretVal     string\n\t\terrChecker Checker\n\t}{\n\t\t{\n\t\t\tconfigMap: map[string]string{\n\t\t\t\t\"testfile.fio\": \"some test data\",\n\t\t\t},\n\t\t\tretVal:     \"testfile.fio\",\n\t\t\terrChecker: IsNil,\n\t\t},\n\t\t{\n\t\t\tconfigMap: map[string]string{\n\t\t\t\t\"ConfigMapSCKey\":   \"storageclass\",\n\t\t\t\t\"ConfigMapSizeKey\": \"10Gi\",\n\t\t\t\t\"testfile.fio\":     \"some test data\",\n\t\t\t},\n\t\t\tretVal:     \"\",\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tret, err := fioTestFilename(tc.configMap)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Assert(ret, Equals, tc.retVal)\n\t}\n}\n\nfunc (s *FIOTestSuite) TestRunFioCommand(c *C) {\n\tvar parsedout FioResult\n\terr := json.Unmarshal([]byte(parsableFioOutput), &parsedout)\n\tc.Assert(err, IsNil)\n\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\texecutor      *fakeKubeExecutor\n\t\terrChecker    Checker\n\t\tpodName       string\n\t\tcontainerName string\n\t\ttestFileName  string\n\t\tout           FioResult\n\t}{\n\t\t{\n\t\t\texecutor: &fakeKubeExecutor{\n\t\t\t\tkeErr:    nil,\n\t\t\t\tkeStrErr: \"\",\n\t\t\t\tkeStdOut: parsableFioOutput,\n\t\t\t},\n\t\t\terrChecker:    IsNil,\n\t\t\tpodName:       \"pod\",\n\t\t\tcontainerName: \"container\",\n\t\t\ttestFileName:  \"tfName\",\n\t\t\tout:           parsedout,\n\t\t},\n\t\t{\n\t\t\texecutor: &fakeKubeExecutor{\n\t\t\t\tkeErr:    nil,\n\t\t\t\tkeStrErr: \"\",\n\t\t\t\tkeStdOut: \"unparsable string\",\n\t\t\t},\n\t\t\terrChecker:    NotNil,\n\t\t\tpodName:       \"pod\",\n\t\t\tcontainerName: \"container\",\n\t\t\ttestFileName:  \"tfName\",\n\t\t\tout:           FioResult{},\n\t\t},\n\t\t{\n\t\t\texecutor: &fakeKubeExecutor{\n\t\t\t\tkeErr:    fmt.Errorf(\"kubeexec err\"),\n\t\t\t\tkeStrErr: \"\",\n\t\t\t\tkeStdOut: \"unparsable string\",\n\t\t\t},\n\t\t\terrChecker:    NotNil,\n\t\t\tpodName:       \"pod\",\n\t\t\tcontainerName: \"container\",\n\t\t\ttestFileName:  \"tfName\",\n\t\t\tout:           FioResult{},\n\t\t},\n\t\t{\n\t\t\texecutor: &fakeKubeExecutor{\n\t\t\t\tkeErr:    nil,\n\t\t\t\tkeStrErr: \"execution error\",\n\t\t\t\tkeStdOut: \"unparsable string\",\n\t\t\t},\n\t\t\terrChecker:    NotNil,\n\t\t\tpodName:       \"pod\",\n\t\t\tcontainerName: \"container\",\n\t\t\ttestFileName:  \"tfName\",\n\t\t\tout:           FioResult{},\n\t\t},\n\t} {\n\t\tstepper := &fioStepper{\n\t\t\tkubeExecutor: tc.executor,\n\t\t}\n\t\tout, err := stepper.runFIOCommand(ctx, tc.podName, tc.containerName, tc.testFileName, DefaultNS)\n\t\tc.Check(err, tc.errChecker)\n\t\tc.Assert(out, DeepEquals, tc.out)\n\t\tc.Assert(tc.executor.keInPodName, Equals, tc.podName)\n\t\tc.Assert(tc.executor.keInContainerName, Equals, tc.containerName)\n\t\tc.Assert(len(tc.executor.keInCommand), Equals, 5)\n\t\tc.Assert(tc.executor.keInCommand[0], Equals, \"fio\")\n\t\tc.Assert(tc.executor.keInCommand[1], Equals, \"--directory\")\n\t\tc.Assert(tc.executor.keInCommand[2], Equals, VolumeMountPath)\n\t\tjobFilePath := fmt.Sprintf(\"%s/%s\", ConfigMapMountPath, tc.testFileName)\n\t\tc.Assert(tc.executor.keInCommand[3], Equals, jobFilePath)\n\t}\n}\n\nfunc (s *FIOTestSuite) TestDeleteConfigMap(c *C) {\n\tctx := context.Background()\n\tdefaultNS := \"default\"\n\tc.Check(os.Setenv(PodNamespaceEnvKey, defaultNS), IsNil)\n\tfor _, tc := range []struct {\n\t\tcli        kubernetes.Interface\n\t\tcm         *v1.ConfigMap\n\t\terrChecker Checker\n\t\tlenCMList  int\n\t}{\n\t\t{ // Don't delete it unless it has the label\n\t\t\tcli: fake.NewSimpleClientset(&v1.ConfigMap{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"cm\",\n\t\t\t\t\tNamespace: defaultNS,\n\t\t\t\t},\n\t\t\t}),\n\t\t\tcm: &v1.ConfigMap{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"cm\",\n\t\t\t\t\tNamespace: defaultNS,\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tlenCMList:  1,\n\t\t},\n\t\t{ // Has label delete\n\t\t\tcli: fake.NewSimpleClientset(&v1.ConfigMap{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"cm\",\n\t\t\t\t\tNamespace: defaultNS,\n\t\t\t\t},\n\t\t\t}),\n\t\t\tcm: &v1.ConfigMap{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"cm\",\n\t\t\t\t\tNamespace: defaultNS,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tCreatedByFIOLabel: \"true\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: IsNil,\n\t\t\tlenCMList:  0,\n\t\t},\n\t\t{ // No cm exists\n\t\t\tcli: fake.NewSimpleClientset(),\n\t\t\tcm: &v1.ConfigMap{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"cm\",\n\t\t\t\t\tNamespace: defaultNS,\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tCreatedByFIOLabel: \"true\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terrChecker: NotNil,\n\t\t},\n\t} {\n\t\tstepper := &fioStepper{cli: tc.cli}\n\t\terr := stepper.deleteConfigMap(ctx, tc.cm, DefaultNS)\n\t\tc.Check(err, tc.errChecker)\n\t\tif err == nil {\n\t\t\tlist, err := stepper.cli.CoreV1().ConfigMaps(defaultNS).List(ctx, metav1.ListOptions{})\n\t\t\tc.Check(err, IsNil)\n\t\t\tc.Assert(len(list.Items), Equals, tc.lenCMList)\n\t\t}\n\t}\n\tc.Check(os.Unsetenv(PodNamespaceEnvKey), IsNil)\n}\n\nfunc (s *FIOTestSuite) TestWaitForPodReady(c *C) {\n\tctx := context.Background()\n\tprChecker := &podReadyChecker{\n\t\tcli: fake.NewSimpleClientset(),\n\t}\n\terr := prChecker.waitForPodReady(ctx, \"somens\", \"somePod\")\n\tc.Check(err, NotNil)\n\tprChecker.cli = fake.NewSimpleClientset(&v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"somePod\",\n\t\t\tNamespace: \"somens\",\n\t\t},\n\t\tStatus: v1.PodStatus{\n\t\t\tPhase: v1.PodRunning,\n\t\t},\n\t})\n}\n\ntype fakePodReadyChecker struct {\n\tprcErr error\n}\n\nfunc (f *fakePodReadyChecker) waitForPodReady(ctx context.Context, namespace, name string) error {\n\treturn f.prcErr\n}\n\ntype fakeKubeExecutor struct {\n\tkeErr             error\n\tkeStdOut          string\n\tkeStrErr          string\n\tkeInNS            string\n\tkeInPodName       string\n\tkeInContainerName string\n\tkeInCommand       []string\n}\n\nfunc (fk *fakeKubeExecutor) exec(_ context.Context, namespace, podName, containerName string, command []string) (string, string, error) {\n\tfk.keInNS = namespace\n\tfk.keInPodName = podName\n\tfk.keInContainerName = containerName\n\tfk.keInCommand = command\n\treturn fk.keStdOut, fk.keStrErr, fk.keErr\n}\n"
  },
  {
    "path": "pkg/fio/fio_types.go",
    "content": "package fio\n\nimport \"fmt\"\n\ntype FioResult struct {\n\tFioVersion    string           `json:\"fio version,omitempty\"`\n\tTimestamp     int64            `json:\"timestamp,omitempty\"`\n\tTimestampMS   int64            `json:\"timestamp_ms,omitempty\"`\n\tTime          string           `json:\"time,omitempty\"`\n\tGlobalOptions FioGlobalOptions `json:\"global options,omitempty\"`\n\tJobs          []FioJobs        `json:\"jobs,omitempty\"`\n\tDiskUtil      []FioDiskUtil    `json:\"disk_util,omitempty\"`\n}\n\nfunc (f FioResult) Print() string {\n\tvar res string\n\tres += fmt.Sprintf(\"FIO version - %s\\n\", f.FioVersion)\n\tres += fmt.Sprintf(\"Global options - %s\\n\\n\", f.GlobalOptions.Print())\n\tfor _, job := range f.Jobs {\n\t\tres += fmt.Sprintf(\"%s\\n\", job.Print())\n\t}\n\tres += \"Disk stats (read/write):\\n\"\n\tfor _, du := range f.DiskUtil {\n\t\tres += fmt.Sprintf(\"%s\\n\", du.Print())\n\t}\n\n\treturn res\n}\n\ntype FioGlobalOptions struct {\n\tDirectory  string `json:\"directory,omitempty\"`\n\tRandRepeat string `json:\"randrepeat,omitempty\"`\n\tVerify     string `json:\"verify,omitempty\"`\n\tIOEngine   string `json:\"ioengine,omitempty\"`\n\tDirect     string `json:\"direct,omitempty\"`\n\tGtodReduce string `json:\"gtod_reduce,omitempty\"`\n}\n\nfunc (g FioGlobalOptions) Print() string {\n\treturn fmt.Sprintf(\"ioengine=%s verify=%s direct=%s gtod_reduce=%s\", g.IOEngine, g.Verify, g.Direct, g.GtodReduce)\n}\n\ntype FioJobs struct {\n\tJobName           string        `json:\"jobname,omitempty\"`\n\tGroupID           int           `json:\"groupid,omitempty\"`\n\tError             int           `json:\"error,omitempty\"`\n\tEta               int           `json:\"eta,omitempty\"`\n\tElapsed           int           `json:\"elapsed,omitempty\"`\n\tJobOptions        FioJobOptions `json:\"job options,omitempty\"`\n\tRead              FioStats      `json:\"read,omitempty\"`\n\tWrite             FioStats      `json:\"write,omitempty\"`\n\tTrim              FioStats      `json:\"trim,omitempty\"`\n\tSync              FioStats      `json:\"sync,omitempty\"`\n\tJobRuntime        int32         `json:\"job_runtime,omitempty\"`\n\tUsrCpu            float32       `json:\"usr_cpu,omitempty\"`\n\tSysCpu            float32       `json:\"sys_cpu,omitempty\"`\n\tCtx               int32         `json:\"ctx,omitempty\"`\n\tMajF              int32         `json:\"majf,omitempty\"`\n\tMinF              int32         `json:\"minf,omitempty\"`\n\tIoDepthLevel      FioDepth      `json:\"iodepth_level,omitempty\"`\n\tIoDepthSubmit     FioDepth      `json:\"iodepth_submit,omitempty\"`\n\tIoDepthComplete   FioDepth      `json:\"iodepth_complete,omitempty\"`\n\tLatencyNs         FioLatency    `json:\"latency_ns,omitempty\"`\n\tLatencyUs         FioLatency    `json:\"latency_us,omitempty\"`\n\tLatencyMs         FioLatency    `json:\"latency_ms,omitempty\"`\n\tLatencyDepth      int32         `json:\"latency_depth,omitempty\"`\n\tLatencyTarget     int32         `json:\"latency_target,omitempty\"`\n\tLatencyPercentile float32       `json:\"latency_percentile,omitempty\"`\n\tLatencyWindow     int32         `json:\"latency_window,omitempty\"`\n}\n\nfunc (j FioJobs) Print() string {\n\tvar job string\n\tjob += fmt.Sprintf(\"%s\\n\", j.JobOptions.Print())\n\tif j.Read.Iops != 0 || j.Read.BW != 0 {\n\t\tjob += fmt.Sprintf(\"read:\\n%s\\n\", j.Read.Print())\n\t}\n\tif j.Write.Iops != 0 || j.Write.BW != 0 {\n\t\tjob += fmt.Sprintf(\"write:\\n%s\\n\", j.Write.Print())\n\t}\n\treturn job\n}\n\ntype FioJobOptions struct {\n\tName     string `json:\"name,omitempty\"`\n\tBS       string `json:\"bs,omitempty\"`\n\tIoDepth  string `json:\"iodepth,omitempty\"`\n\tSize     string `json:\"size,omitempty\"`\n\tRW       string `json:\"rw,omitempty\"`\n\tRampTime string `json:\"ramp_time,omitempty\"`\n\tRunTime  string `json:\"runtime,omitempty\"`\n}\n\nfunc (o FioJobOptions) Print() string {\n\treturn fmt.Sprintf(\"JobName: %s\\n  blocksize=%s filesize=%s iodepth=%s rw=%s\", o.Name, o.BS, o.Size, o.IoDepth, o.RW)\n}\n\ntype FioStats struct {\n\tIOBytes     int64   `json:\"io_bytes,omitempty\"`\n\tIOKBytes    int64   `json:\"io_kbytes,omitempty\"`\n\tBWBytes     int64   `json:\"bw_bytes,omitempty\"`\n\tBW          int64   `json:\"bw,omitempty\"`\n\tIops        float32 `json:\"iops,omitempty\"`\n\tRuntime     int64   `json:\"runtime,omitempty\"`\n\tTotalIos    int64   `json:\"total_ios,omitempty\"`\n\tShortIos    int64   `json:\"short_ios,omitempty\"`\n\tDropIos     int64   `json:\"drop_ios,omitempty\"`\n\tSlatNs      FioNS   `json:\"slat_ns,omitempty\"`\n\tClatNs      FioNS   `json:\"clat_ns,omitempty\"`\n\tLatNs       FioNS   `json:\"lat_ns,omitempty\"`\n\tBwMin       int64   `json:\"bw_min,omitempty\"`\n\tBwMax       int64   `json:\"bw_max,omitempty\"`\n\tBwAgg       float32 `json:\"bw_agg,omitempty\"`\n\tBwMean      float32 `json:\"bw_mean,omitempty\"`\n\tBwDev       float32 `json:\"bw_dev,omitempty\"`\n\tBwSamples   int32   `json:\"bw_samples,omitempty\"`\n\tIopsMin     int32   `json:\"iops_min,omitempty\"`\n\tIopsMax     int32   `json:\"iops_max,omitempty\"`\n\tIopsMean    float32 `json:\"iops_mean,omitempty\"`\n\tIopsStdDev  float32 `json:\"iops_stddev,omitempty\"`\n\tIopsSamples int32   `json:\"iops_samples,omitempty\"`\n}\n\nfunc (s FioStats) Print() string {\n\tvar stats string\n\tstats += fmt.Sprintf(\"  IOPS=%f BW(KiB/s)=%d\\n\", s.Iops, s.BW)\n\tstats += fmt.Sprintf(\"  iops: min=%d max=%d avg=%f\\n\", s.IopsMin, s.IopsMax, s.IopsMean)\n\tstats += fmt.Sprintf(\"  bw(KiB/s): min=%d max=%d avg=%f\", s.BwMin, s.BwMax, s.BwMean)\n\treturn stats\n}\n\ntype FioNS struct {\n\tMin    int64   `json:\"min,omitempty\"`\n\tMax    int64   `json:\"max,omitempty\"`\n\tMean   float32 `json:\"mean,omitempty\"`\n\tStdDev float32 `json:\"stddev,omitempty\"`\n\tN      int64   `json:\"N,omitempty\"`\n}\n\ntype FioDepth struct {\n\tFioDepth0    float32 `json:\"0,omitempty\"`\n\tFioDepth1    float32 `json:\"1,omitempty\"`\n\tFioDepth2    float32 `json:\"2,omitempty\"`\n\tFioDepth4    float32 `json:\"4,omitempty\"`\n\tFioDepth8    float32 `json:\"8,omitempty\"`\n\tFioDepth16   float32 `json:\"16,omitempty\"`\n\tFioDepth32   float32 `json:\"32,omitempty\"`\n\tFioDepth64   float32 `json:\"64,omitempty\"`\n\tFioDepthGE64 float32 `json:\">=64,omitempty\"`\n}\n\ntype FioLatency struct {\n\tFioLat2      float32 `json:\"2,omitempty\"`\n\tFioLat4      float32 `json:\"4,omitempty\"`\n\tFioLat10     float32 `json:\"10,omitempty\"`\n\tFioLat20     float32 `json:\"20,omitempty\"`\n\tFioLat50     float32 `json:\"50,omitempty\"`\n\tFioLat100    float32 `json:\"100,omitempty\"`\n\tFioLat250    float32 `json:\"250,omitempty\"`\n\tFioLat500    float32 `json:\"500,omitempty\"`\n\tFioLat750    float32 `json:\"750,omitempty\"`\n\tFioLat1000   float32 `json:\"1000,omitempty\"`\n\tFioLat2000   float32 `json:\"2000,omitempty\"`\n\tFioLatGE2000 float32 `json:\">=2000,omitempty\"`\n}\n\ntype FioDiskUtil struct {\n\tName        string  `json:\"name,omitempty\"`\n\tReadIos     int64   `json:\"read_ios,omitempty\"`\n\tWriteIos    int64   `json:\"write_ios,omitempty\"`\n\tReadMerges  int64   `json:\"read_merges,omitempty\"`\n\tWriteMerges int64   `json:\"write_merges,omitempty\"`\n\tReadTicks   int64   `json:\"read_ticks,omitempty\"`\n\tWriteTicks  int64   `json:\"write_ticks,omitempty\"`\n\tInQueue     int64   `json:\"in_queue,omitempty\"`\n\tUtil        float32 `json:\"util,omitempty\"`\n}\n\nfunc (d FioDiskUtil) Print() string {\n\t//Disk stats (read/write):\n\t//rbd4: ios=30022/11982, merge=0/313, ticks=1028675/1022768, in_queue=2063740, util=99.67%\n\tvar du string\n\tdu += fmt.Sprintf(\"  %s: ios=%d/%d merge=%d/%d ticks=%d/%d in_queue=%d, util=%f%%\", d.Name, d.ReadIos,\n\t\td.WriteIos, d.ReadMerges, d.WriteMerges, d.ReadTicks, d.WriteTicks, d.InQueue, d.Util)\n\treturn du\n}\n"
  },
  {
    "path": "pkg/fio/parsable_fio_output.go",
    "content": "package fio\n\nconst parsableFioOutput = `{\n\t\"fio version\" : \"fio-3.20\",\n\t\"timestamp\" : 1611952282,\n\t\"timestamp_ms\" : 1611952282240,\n\t\"time\" : \"Fri Jan 29 20:31:22 2021\",\n\t\"global options\" : {\n\t  \"directory\" : \"/dataset\",\n\t  \"randrepeat\" : \"0\",\n\t  \"verify\" : \"0\",\n\t  \"ioengine\" : \"libaio\",\n\t  \"direct\" : \"1\",\n\t  \"gtod_reduce\" : \"1\"\n\t},\n\t\"jobs\" : [\n\t  {\n\t\t\"jobname\" : \"read_iops\",\n\t\t\"groupid\" : 0,\n\t\t\"error\" : 0,\n\t\t\"eta\" : 0,\n\t\t\"elapsed\" : 18,\n\t\t\"job options\" : {\n\t\t  \"name\" : \"read_iops\",\n\t\t  \"bs\" : \"4K\",\n\t\t  \"iodepth\" : \"64\",\n\t\t  \"size\" : \"2G\",\n\t\t  \"rw\" : \"randread\",\n\t\t  \"ramp_time\" : \"2s\",\n\t\t  \"runtime\" : \"15s\"\n\t\t},\n\t\t\"read\" : {\n\t\t  \"io_bytes\" : 61886464,\n\t\t  \"io_kbytes\" : 60436,\n\t\t  \"bw_bytes\" : 4039322,\n\t\t  \"bw\" : 3944,\n\t\t  \"iops\" : 982.050780,\n\t\t  \"runtime\" : 15321,\n\t\t  \"total_ios\" : 15046,\n\t\t  \"short_ios\" : 0,\n\t\t  \"drop_ios\" : 0,\n\t\t  \"slat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"clat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"lat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"bw_min\" : 1919,\n\t\t  \"bw_max\" : 7664,\n\t\t  \"bw_agg\" : 100.000000,\n\t\t  \"bw_mean\" : 3995.000000,\n\t\t  \"bw_dev\" : 1200.820783,\n\t\t  \"bw_samples\" : 30,\n\t\t  \"iops_min\" : 479,\n\t\t  \"iops_max\" : 1916,\n\t\t  \"iops_mean\" : 998.566667,\n\t\t  \"iops_stddev\" : 300.247677,\n\t\t  \"iops_samples\" : 30\n\t\t},\n\t\t\"write\" : {\n\t\t  \"io_bytes\" : 0,\n\t\t  \"io_kbytes\" : 0,\n\t\t  \"bw_bytes\" : 0,\n\t\t  \"bw\" : 0,\n\t\t  \"iops\" : 0.000000,\n\t\t  \"runtime\" : 0,\n\t\t  \"total_ios\" : 0,\n\t\t  \"short_ios\" : 0,\n\t\t  \"drop_ios\" : 0,\n\t\t  \"slat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"clat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"lat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"bw_min\" : 0,\n\t\t  \"bw_max\" : 0,\n\t\t  \"bw_agg\" : 0.000000,\n\t\t  \"bw_mean\" : 0.000000,\n\t\t  \"bw_dev\" : 0.000000,\n\t\t  \"bw_samples\" : 0,\n\t\t  \"iops_min\" : 0,\n\t\t  \"iops_max\" : 0,\n\t\t  \"iops_mean\" : 0.000000,\n\t\t  \"iops_stddev\" : 0.000000,\n\t\t  \"iops_samples\" : 0\n\t\t},\n\t\t\"trim\" : {\n\t\t  \"io_bytes\" : 0,\n\t\t  \"io_kbytes\" : 0,\n\t\t  \"bw_bytes\" : 0,\n\t\t  \"bw\" : 0,\n\t\t  \"iops\" : 0.000000,\n\t\t  \"runtime\" : 0,\n\t\t  \"total_ios\" : 0,\n\t\t  \"short_ios\" : 0,\n\t\t  \"drop_ios\" : 0,\n\t\t  \"slat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"clat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"lat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"bw_min\" : 0,\n\t\t  \"bw_max\" : 0,\n\t\t  \"bw_agg\" : 0.000000,\n\t\t  \"bw_mean\" : 0.000000,\n\t\t  \"bw_dev\" : 0.000000,\n\t\t  \"bw_samples\" : 0,\n\t\t  \"iops_min\" : 0,\n\t\t  \"iops_max\" : 0,\n\t\t  \"iops_mean\" : 0.000000,\n\t\t  \"iops_stddev\" : 0.000000,\n\t\t  \"iops_samples\" : 0\n\t\t},\n\t\t\"sync\" : {\n\t\t  \"total_ios\" : 0,\n\t\t  \"lat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  }\n\t\t},\n\t\t\"job_runtime\" : 15322,\n\t\t\"usr_cpu\" : 1.109516,\n\t\t\"sys_cpu\" : 3.648349,\n\t\t\"ctx\" : 17991,\n\t\t\"majf\" : 1,\n\t\t\"minf\" : 62,\n\t\t\"iodepth_level\" : {\n\t\t  \"1\" : 0.000000,\n\t\t  \"2\" : 0.000000,\n\t\t  \"4\" : 0.000000,\n\t\t  \"8\" : 0.000000,\n\t\t  \"16\" : 0.000000,\n\t\t  \"32\" : 0.000000,\n\t\t  \">=64\" : 100.000000\n\t\t},\n\t\t\"iodepth_submit\" : {\n\t\t  \"0\" : 0.000000,\n\t\t  \"4\" : 100.000000,\n\t\t  \"8\" : 0.000000,\n\t\t  \"16\" : 0.000000,\n\t\t  \"32\" : 0.000000,\n\t\t  \"64\" : 0.000000,\n\t\t  \">=64\" : 0.000000\n\t\t},\n\t\t\"iodepth_complete\" : {\n\t\t  \"0\" : 0.000000,\n\t\t  \"4\" : 99.993354,\n\t\t  \"8\" : 0.000000,\n\t\t  \"16\" : 0.000000,\n\t\t  \"32\" : 0.000000,\n\t\t  \"64\" : 0.100000,\n\t\t  \">=64\" : 0.000000\n\t\t},\n\t\t\"latency_ns\" : {\n\t\t  \"2\" : 0.000000,\n\t\t  \"4\" : 0.000000,\n\t\t  \"10\" : 0.000000,\n\t\t  \"20\" : 0.000000,\n\t\t  \"50\" : 0.000000,\n\t\t  \"100\" : 0.000000,\n\t\t  \"250\" : 0.000000,\n\t\t  \"500\" : 0.000000,\n\t\t  \"750\" : 0.000000,\n\t\t  \"1000\" : 0.000000\n\t\t},\n\t\t\"latency_us\" : {\n\t\t  \"2\" : 0.000000,\n\t\t  \"4\" : 0.000000,\n\t\t  \"10\" : 0.000000,\n\t\t  \"20\" : 0.000000,\n\t\t  \"50\" : 0.000000,\n\t\t  \"100\" : 0.000000,\n\t\t  \"250\" : 0.000000,\n\t\t  \"500\" : 0.000000,\n\t\t  \"750\" : 0.000000,\n\t\t  \"1000\" : 0.000000\n\t\t},\n\t\t\"latency_ms\" : {\n\t\t  \"2\" : 0.000000,\n\t\t  \"4\" : 0.000000,\n\t\t  \"10\" : 0.000000,\n\t\t  \"20\" : 0.000000,\n\t\t  \"50\" : 0.000000,\n\t\t  \"100\" : 0.000000,\n\t\t  \"250\" : 0.000000,\n\t\t  \"500\" : 0.000000,\n\t\t  \"750\" : 0.000000,\n\t\t  \"1000\" : 0.000000,\n\t\t  \"2000\" : 0.000000,\n\t\t  \">=2000\" : 0.000000\n\t\t},\n\t\t\"latency_depth\" : 64,\n\t\t\"latency_target\" : 0,\n\t\t\"latency_percentile\" : 100.000000,\n\t\t\"latency_window\" : 0\n\t  },\n\t  {\n\t\t\"jobname\" : \"write_iops\",\n\t\t\"groupid\" : 0,\n\t\t\"error\" : 0,\n\t\t\"eta\" : 0,\n\t\t\"elapsed\" : 18,\n\t\t\"job options\" : {\n\t\t  \"name\" : \"write_iops\",\n\t\t  \"bs\" : \"4K\",\n\t\t  \"iodepth\" : \"64\",\n\t\t  \"size\" : \"2G\",\n\t\t  \"rw\" : \"randwrite\",\n\t\t  \"ramp_time\" : \"2s\",\n\t\t  \"runtime\" : \"15s\"\n\t\t},\n\t\t\"read\" : {\n\t\t  \"io_bytes\" : 0,\n\t\t  \"io_kbytes\" : 0,\n\t\t  \"bw_bytes\" : 0,\n\t\t  \"bw\" : 0,\n\t\t  \"iops\" : 0.000000,\n\t\t  \"runtime\" : 0,\n\t\t  \"total_ios\" : 0,\n\t\t  \"short_ios\" : 0,\n\t\t  \"drop_ios\" : 0,\n\t\t  \"slat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"clat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"lat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"bw_min\" : 0,\n\t\t  \"bw_max\" : 0,\n\t\t  \"bw_agg\" : 0.000000,\n\t\t  \"bw_mean\" : 0.000000,\n\t\t  \"bw_dev\" : 0.000000,\n\t\t  \"bw_samples\" : 0,\n\t\t  \"iops_min\" : 0,\n\t\t  \"iops_max\" : 0,\n\t\t  \"iops_mean\" : 0.000000,\n\t\t  \"iops_stddev\" : 0.000000,\n\t\t  \"iops_samples\" : 0\n\t\t},\n\t\t\"write\" : {\n\t\t  \"io_bytes\" : 24805376,\n\t\t  \"io_kbytes\" : 24224,\n\t\t  \"bw_bytes\" : 1616406,\n\t\t  \"bw\" : 1578,\n\t\t  \"iops\" : 390.525218,\n\t\t  \"runtime\" : 15346,\n\t\t  \"total_ios\" : 5993,\n\t\t  \"short_ios\" : 0,\n\t\t  \"drop_ios\" : 0,\n\t\t  \"slat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"clat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"lat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"bw_min\" : 512,\n\t\t  \"bw_max\" : 2706,\n\t\t  \"bw_agg\" : 100.000000,\n\t\t  \"bw_mean\" : 1581.066667,\n\t\t  \"bw_dev\" : 476.641189,\n\t\t  \"bw_samples\" : 30,\n\t\t  \"iops_min\" : 128,\n\t\t  \"iops_max\" : 676,\n\t\t  \"iops_mean\" : 395.033333,\n\t\t  \"iops_stddev\" : 119.151738,\n\t\t  \"iops_samples\" : 30\n\t\t},\n\t\t\"trim\" : {\n\t\t  \"io_bytes\" : 0,\n\t\t  \"io_kbytes\" : 0,\n\t\t  \"bw_bytes\" : 0,\n\t\t  \"bw\" : 0,\n\t\t  \"iops\" : 0.000000,\n\t\t  \"runtime\" : 0,\n\t\t  \"total_ios\" : 0,\n\t\t  \"short_ios\" : 0,\n\t\t  \"drop_ios\" : 0,\n\t\t  \"slat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"clat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"lat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  },\n\t\t  \"bw_min\" : 0,\n\t\t  \"bw_max\" : 0,\n\t\t  \"bw_agg\" : 0.000000,\n\t\t  \"bw_mean\" : 0.000000,\n\t\t  \"bw_dev\" : 0.000000,\n\t\t  \"bw_samples\" : 0,\n\t\t  \"iops_min\" : 0,\n\t\t  \"iops_max\" : 0,\n\t\t  \"iops_mean\" : 0.000000,\n\t\t  \"iops_stddev\" : 0.000000,\n\t\t  \"iops_samples\" : 0\n\t\t},\n\t\t\"sync\" : {\n\t\t  \"total_ios\" : 0,\n\t\t  \"lat_ns\" : {\n\t\t\t\"min\" : 0,\n\t\t\t\"max\" : 0,\n\t\t\t\"mean\" : 0.000000,\n\t\t\t\"stddev\" : 0.000000,\n\t\t\t\"N\" : 0\n\t\t  }\n\t\t},\n\t\t\"job_runtime\" : 15345,\n\t\t\"usr_cpu\" : 0.508309,\n\t\t\"sys_cpu\" : 2.280873,\n\t\t\"ctx\" : 7411,\n\t\t\"majf\" : 1,\n\t\t\"minf\" : 63,\n\t\t\"iodepth_level\" : {\n\t\t  \"1\" : 0.000000,\n\t\t  \"2\" : 0.000000,\n\t\t  \"4\" : 0.000000,\n\t\t  \"8\" : 0.000000,\n\t\t  \"16\" : 0.000000,\n\t\t  \"32\" : 0.000000,\n\t\t  \">=64\" : 100.000000\n\t\t},\n\t\t\"iodepth_submit\" : {\n\t\t  \"0\" : 0.000000,\n\t\t  \"4\" : 100.000000,\n\t\t  \"8\" : 0.000000,\n\t\t  \"16\" : 0.000000,\n\t\t  \"32\" : 0.000000,\n\t\t  \"64\" : 0.000000,\n\t\t  \">=64\" : 0.000000\n\t\t},\n\t\t\"iodepth_complete\" : {\n\t\t  \"0\" : 0.000000,\n\t\t  \"4\" : 99.983317,\n\t\t  \"8\" : 0.000000,\n\t\t  \"16\" : 0.000000,\n\t\t  \"32\" : 0.000000,\n\t\t  \"64\" : 0.100000,\n\t\t  \">=64\" : 0.000000\n\t\t},\n\t\t\"latency_ns\" : {\n\t\t  \"2\" : 0.000000,\n\t\t  \"4\" : 0.000000,\n\t\t  \"10\" : 0.000000,\n\t\t  \"20\" : 0.000000,\n\t\t  \"50\" : 0.000000,\n\t\t  \"100\" : 0.000000,\n\t\t  \"250\" : 0.000000,\n\t\t  \"500\" : 0.000000,\n\t\t  \"750\" : 0.000000,\n\t\t  \"1000\" : 0.000000\n\t\t},\n\t\t\"latency_us\" : {\n\t\t  \"2\" : 0.000000,\n\t\t  \"4\" : 0.000000,\n\t\t  \"10\" : 0.000000,\n\t\t  \"20\" : 0.000000,\n\t\t  \"50\" : 0.000000,\n\t\t  \"100\" : 0.000000,\n\t\t  \"250\" : 0.000000,\n\t\t  \"500\" : 0.000000,\n\t\t  \"750\" : 0.000000,\n\t\t  \"1000\" : 0.000000\n\t\t},\n\t\t\"latency_ms\" : {\n\t\t  \"2\" : 0.000000,\n\t\t  \"4\" : 0.000000,\n\t\t  \"10\" : 0.000000,\n\t\t  \"20\" : 0.000000,\n\t\t  \"50\" : 0.000000,\n\t\t  \"100\" : 0.000000,\n\t\t  \"250\" : 0.000000,\n\t\t  \"500\" : 0.000000,\n\t\t  \"750\" : 0.000000,\n\t\t  \"1000\" : 0.000000,\n\t\t  \"2000\" : 0.000000,\n\t\t  \">=2000\" : 0.000000\n\t\t},\n\t\t\"latency_depth\" : 64,\n\t\t\"latency_target\" : 0,\n\t\t\"latency_percentile\" : 100.000000,\n\t\t\"latency_window\" : 0\n\t  }\n\t],\n\t\"disk_util\" : [\n\t  {\n\t\t\"name\" : \"rbd4\",\n\t\t\"read_ios\" : 16957,\n\t\t\"write_ios\" : 6896,\n\t\t\"read_merges\" : 0,\n\t\t\"write_merges\" : 207,\n\t\t\"read_ticks\" : 1072290,\n\t\t\"write_ticks\" : 1043421,\n\t\t\"in_queue\" : 2119036,\n\t\t\"util\" : 99.712875\n\t  }\n\t]\n  }`\n"
  },
  {
    "path": "pkg/kubestr/csi-drivers.go",
    "content": "package kubestr\n\n// THIS FILE IS AUTO_GENERATED.\n// To generate file run \"go generate\" at the top level\n// This file must be checked in.\n\nvar CSIDriverList = []*CSIDriver{\n\t{NameUrl: \"[Alicloud Disk](https://github.com/AliyunContainerService/csi-plugin)\", DriverName: \"diskplugin.csi.alibabacloud.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Alicloud Disk\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot\"},\n\t{NameUrl: \"[Alicloud NAS](https://github.com/AliyunContainerService/csi-plugin)\", DriverName: \"nasplugin.csi.alibabacloud.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Alicloud Network Attached Storage (NAS)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"No\", Features: \"\"},\n\t{NameUrl: \"[Alicloud OSS](https://github.com/AliyunContainerService/csi-plugin)\", DriverName: \"ossplugin.csi.alibabacloud.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Alicloud Object Storage Service (OSS)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"No\", Features: \"\"},\n\t{NameUrl: \"[ArStor CSI](https://github.com/huayun-docs/csi-driver-arstor)\", DriverName: \"arstor.csi.huayun.io\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Huayun Storage Service (ArStor)\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[AWS Elastic Block Storage](https://github.com/kubernetes-sigs/aws-ebs-csi-driver)\", DriverName: \"ebs.csi.aws.com\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for AWS Elastic Block Storage (EBS)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion\"},\n\t{NameUrl: \"[AWS Elastic File System](https://github.com/aws/aws-efs-csi-driver)\", DriverName: \"efs.csi.aws.com\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for AWS Elastic File System (EFS)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"No\", Features: \"\"},\n\t{NameUrl: \"[AWS FSx for Lustre](https://github.com/aws/aws-fsx-csi-driver)\", DriverName: \"fsx.csi.aws.com\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for AWS FSx for Lustre (EBS)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[Azure disk](https://github.com/kubernetes-sigs/azuredisk-csi-driver)\", DriverName: \"disk.csi.azure.com\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Azure disk\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[Azure file](https://github.com/kubernetes-sigs/azurefile-csi-driver)\", DriverName: \"file.csi.azure.com\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Azure file\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[BeeGFS](https://github.com/NetApp/beegfs-csi-driver)\", DriverName: \"beegfs.csi.netapp.com\", Versions: \"v1.3\", Description: \"A Container Storage Interface (CSI) Driver for the [BeeGFS](https://www.beegfs.io/) Parallel File System\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[Bigtera VirtualStor (block)](https://github.com/bigtera-ce/ceph-csi)\", DriverName: \"csi.block.bigtera.com\", Versions: \"v0.3, v1.0.0, v1.1.0\", Description: \"A Container Storage Interface (CSI) Driver for Bigtera VirtualStor block storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion\"},\n\t{NameUrl: \"[Bigtera VirtualStor (filesystem)](https://github.com/bigtera-ce/ceph-csi)\", DriverName: \"csi.fs.bigtera.com\", Versions: \"v0.3, v1.0.0, v1.1.0\", Description: \"A Container Storage Interface (CSI)  Driver for Bigtera VirtualStor filesystem\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Expansion\"},\n\t{NameUrl: \"[BizFlyCloud Block Storage](https://github.com/bizflycloud/csi-bizflycloud)\", DriverName: \"volume.csi.bizflycloud.vn\", Versions: \"v1.2\", Description: \"A Container Storage Interface (CSI) Driver for BizFly Cloud block storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion\"},\n\t{NameUrl: \"[CephFS](https://github.com/ceph/ceph-csi)\", DriverName: \"cephfs.csi.ceph.com\", Versions: \"v0.3, >=v1.0.0\", Description: \"A Container Storage Interface (CSI) Driver for CephFS\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Expansion, Snapshot, Cloning\"},\n\t{NameUrl: \"[Ceph RBD](https://github.com/ceph/ceph-csi)\", DriverName: \"rbd.csi.ceph.com\", Versions: \"v0.3, >=v1.0.0\", Description: \"A Container Storage Interface (CSI)  Driver for Ceph RBD\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Topology, Cloning\"},\n\t{NameUrl: \"[ChubaoFS](https://github.com/chubaofs/chubaofs-csi)\", DriverName: \"csi.chubaofs.com\", Versions: \"v1.0.0\", Description: \"A Container Storage Interface (CSI) Driver for ChubaoFS Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[Cinder](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/cinder)\", DriverName: \"cinder.csi.openstack.org\", Versions: \"v0.3, v1.0, v1.1.0, v1.2.0, v1.3.0\", Description: \"A Container Storage Interface (CSI) Driver for OpenStack Cinder\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Depends on the storage backend used\", DynamicProvisioning: \"Yes, if storage backend supports it\", Features: \"Raw Block, Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[cloudscale.ch](https://github.com/cloudscale-ch/csi-cloudscale)\", DriverName: \"csi.cloudscale.ch\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for the [cloudscale.ch](https://www.cloudscale.ch/) IaaS platform\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Snapshot\"},\n\t{NameUrl: \"[Datatom-InfinityCSI](https://github.com/datatom-infinity/infinity-csi)\", DriverName: \"csi-infiblock-plugin\", Versions: \"v0.3, v1.0.0, v1.1.0\", Description: \"A Container Storage Interface (CSI)  Driver for DATATOM Infinity storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Topology\"},\n\t{NameUrl: \"[Datatom-InfinityCSI (filesystem)](https://github.com/datatom-infinity/infinity-csi)\", DriverName: \"csi-infifs-plugin\", Versions: \"v0.3, v1.0.0, v1.1.0\", Description: \"A Container Storage Interface (CSI)  Driver for DATATOM Infinity filesystem storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Expansion\"},\n\t{NameUrl: \"[Datera](https://github.com/Datera/datera-csi)\", DriverName: \"dsp.csi.daterainc.io\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Datera Data Services Platform (DSP)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Snapshot\"},\n\t{NameUrl: \"[DDN EXAScaler](https://github.com/DDNStorage/exa-csi-driver)\", DriverName: \"exa.csi.ddn.com\", Versions: \"v1.0, v1.1\", Description: \"A Container Storage Interface (CSI) Driver for DDN EXAScaler filesystems\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Expansion\"},\n\t{NameUrl: \"[Dell EMC PowerMax](https://github.com/dell/csi-powermax)\", DriverName: \"csi-powermax.dellemc.com\", Versions: \"v1.1\", Description: \"A Container Storage Interface (CSI) Driver for [Dell EMC PowerMax](https://www.delltechnologies.com/en-us/storage/powermax.htm)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[Dell EMC PowerScale](https://github.com/dell/csi-powerscale)\", DriverName: \"csi-isilon.dellemc.com\", Versions: \"v1.1\", Description: \"A Container Storage Interface (CSI) Driver for [Dell EMC PowerScale](https://www.delltechnologies.com/en-us/storage/powerscale.htm)\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[Dell EMC PowerStore](https://github.com/dell/csi-powerstore)\", DriverName: \"csi-powerstore.dellemc.com\", Versions: \"v1.1\", Description: \"A Container Storage Interface (CSI) Driver for [Dell EMC PowerStore](https://www.delltechnologies.com/en-us/storage/powerstore-storage-appliance.htm)\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[Dell EMC Unity](https://github.com/dell/csi-unity)\", DriverName: \"csi-unity.dellemc.com\", Versions: \"v1.1\", Description: \"A Container Storage Interface (CSI) Driver for [Dell EMC Unity](https://www.delltechnologies.com/en-us/storage/unity.htm)\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[Dell EMC VxFlexOS](https://github.com/dell/csi-vxflexos)\", DriverName: \"csi-vxflexos.dellemc.com\", Versions: \"v1.1\", Description: \"A Container Storage Interface (CSI) Driver for [Dell EMC VxFlexOS](https://www.delltechnologies.com/en-us/hyperconverged-infrastructure/vxflex.htm)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[democratic-csi](https://github.com/democratic-csi/democratic-csi)\", DriverName: \"org.democratic-csi\", Versions: \"v1.0,v1.1,v1.2,v1.3,v1.4,v1.5\", Description: \"Generic CSI plugin supporting zfs based solutions ([FreeNAS](https://www.freenas.org/) / [TrueNAS](https://www.truenas.com/) and [ZoL](https://zfsonlinux.org/) solutions such as [Ubuntu](https://ubuntu.com/)), [Synology](https://www.synology.com/), and more\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Single Pod (Block Volume) <br/><br/> Read/Write Multiple Pods (File Volume)\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[Diamanti-CSI](https://diamanti.com/use-cases/io-acceleration/#csi)\", DriverName: \"dcx.csi.diamanti.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Diamanti DCX Platform\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion\"},\n\t{NameUrl: \"[DigitalOcean Block Storage](https://github.com/digitalocean/csi-digitalocean)\", DriverName: \"dobs.csi.digitalocean.com\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for DigitalOcean Block Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion\"},\n\t{NameUrl: \"[Dothill-CSI](https://github.com/enix/dothill-csi)\", DriverName: \"dothill.csi.enix.io\", Versions: \"v1.3\", Description: \"Generic CSI plugin supporting [Seagate AssuredSan](https://www.seagate.com/fr/fr/support/dothill-san/assuredsan-pro-5000-series/) appliances such as [HPE MSA](https://www.hpe.com/us/en/storage/flash-hybrid.html), [Dell EMC PowerVault ME4](https://www.dell.com/fr-fr/work/shop/productdetailstxn/powervault-me4-series) and others ...\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Node\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion\"},\n\t{NameUrl: \"[Ember CSI](https://ember-csi.io)\", DriverName: \"ember-csi.io\", Versions: \"v0.2, v0.3, v1.0\", Description: \"Multi-vendor CSI plugin supporting over 80 Drivers to provide block and mount storage to Container Orchestration systems.\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot\"},\n\t{NameUrl: \"[Excelero NVMesh](https://github.com/Excelero/nvmesh-csi-driver)\", DriverName: \"nvmesh-csi.excelero.com\", Versions: \"v1.0, v1.1\", Description: \"A Container Storage Interface (CSI) Driver for Excelero NVMesh\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Expansion\"},\n\t{NameUrl: \"[GCE Persistent Disk](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver)\", DriverName: \"pd.csi.storage.gke.io\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Google Compute Engine Persistent Disk (GCE PD)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Topology\"},\n\t{NameUrl: \"[Google Cloud Filestore](https://github.com/kubernetes-sigs/gcp-filestore-csi-driver)\", DriverName: \"com.google.csi.filestore\", Versions: \"v0.3\", Description: \"A Container Storage Interface (CSI) Driver for Google Cloud Filestore\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[Google Cloud Storage](https://github.com/ofek/csi-gcs)\", DriverName: \"gcs.csi.ofek.dev\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Google Cloud Storage\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Expansion\"},\n\t{NameUrl: \"[GlusterFS](https://github.com/gluster/gluster-csi-driver)\", DriverName: \"org.gluster.glusterfs\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for GlusterFS\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot\"},\n\t{NameUrl: \"[Gluster VirtBlock](https://github.com/gluster/gluster-csi-driver)\", DriverName: \"org.gluster.glustervirtblock\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Gluster Virtual Block volumes\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[Hammerspace CSI](https://github.com/hammer-space/csi-plugin)\", DriverName: \"com.hammerspace.csi\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Hammerspace Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot\"},\n\t{NameUrl: \"[Hedvig](https://documentation.commvault.com/commvault/hedvig/others/pdf/Hedvig_CSI_User_Guide.pdf)\", DriverName: \"io.hedvig.csi\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Hedvig\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion\"},\n\t{NameUrl: \"[Hetzner Cloud Volumes CSI](https://github.com/hetznercloud/csi-driver)\", DriverName: \"csi.hetzner.cloud\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Hetzner Cloud Volumes\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Expansion\"},\n\t{NameUrl: \"[Hitachi Vantara](https://knowledge.hitachivantara.com/Documents/Adapters_and_Drivers/Storage_Adapters_and_Drivers/Containers)\", DriverName: \"hspc.csi.hitachi.com\", Versions: \"v1.2\", Description: \"A Container Storage Interface (CSI) Driver for VSP series Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[HPE](https://github.com/hpe-storage/csi-driver)\", DriverName: \"csi.hpe.com\", Versions: \"v1.3\", Description: \"A [multi-platform](https://scod.hpedev.io/csi_driver) Container Storage Interface (CSI) driver. Supports [HPE Alletra](https://hpe.com/storage/alletra), [Nimble Storage](https://hpe.com/storage/nimble), [Primera](https://hpe.com/storage/primera) and [3PAR](https://hpe.com/storage/3par)\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[HPE Ezmeral (MapR)](https://github.com/mapr/mapr-csi)\", DriverName: \"com.mapr.csi-kdf\", Versions: \"v1.3\", Description: \"A Container Storage Interface (CSI) Driver for HPE Ezmeral Data Fabric\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[Huawei Storage CSI](https://github.com/Huawei/eSDK_K8S_Plugin)\", DriverName: \"csi.huawei.com\", Versions: \"v1.0, v1.1, v1.2\", Description: \"A Container Storage Interface (CSI) Driver for FusionStorage, OceanStor 100D, OceanStor Pacific, OceanStor Dorado V3, OceanStor Dorado V6, OceanStor V3, OceanStor V5\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pod\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[HyperV CSI](https://github.com/Zetanova/hyperv-csi-driver)\", DriverName: \"eu.zetanova.csi.hyperv\", Versions: \"v1.0, v1.1\", Description: \"A Container Storage Interface (CSI) driver to manage hyperv hosts\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[IBM Block Storage](https://github.com/ibm/ibm-block-csi-driver)\", DriverName: \"block.csi.ibm.com\", Versions: \"v1.0, v1.1, v1.2\", Description: \"A Container Storage Interface (CSI) [Driver](https://www.ibm.com/docs/en/stg-block-csi-driver) for IBM Spectrum Virtualize Family, IBM FlashSystem A9000 and A9000R, IBM DS8000 Family 8.x and higher.\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[IBM Spectrum Scale](https://github.com/IBM/ibm-spectrum-scale-csi)\", DriverName: \"spectrumscale.csi.ibm.com\", Versions: \"v1.0, v1.1\", Description: \"A Container Storage Interface (CSI) [Driver](https://www.ibm.com/docs/en/spectrum-scale-csi) for the IBM Spectrum Scale File System\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pod\", DynamicProvisioning: \"Yes\", Features: \"Snapshot\"},\n\t{NameUrl: \"[IBM Cloud Block Storage VPC CSI Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block)\", DriverName: \"vpc.block.csi.ibm.io\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) [Driver](https://cloud.ibm.com/docs/containers?topic=containers-vpc-block) for IBM Cloud Kubernetes Service and Red Hat OpenShift on IBM Cloud\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block\"},\n\t{NameUrl: \"[Infinidat](https://github.com/Infinidat/infinibox-csi-driver)\", DriverName: \"infinibox-csi-driver\", Versions: \"v1.0, v1.1\", Description: \"A Container Storage Interface (CSI) Driver for Infinidat [InfiniBox](https://infinidat.com/en/products-technology/infinibox)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[Inspur InStorage CSI](https://github.com/OpenInspur/instorage-k8s)\", DriverName: \"csi-instorage\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for inspur AS/HF/CS/CF Series Primary Storage, inspur AS13000 Series SDS Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[Intel PMEM-CSI](https://github.com/intel/pmem-csi)\", DriverName: \"pmem-csi.intel.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) driver for [PMEM](https://pmem.io/) from Intel\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block\"},\n\t{NameUrl: \"[Intelliflash Block Storage](https://github.com/DDNStorage/intelliflash-csi-block-driver)\", DriverName: \"intelliflash-csi-block-driver.intelliflash.com\", Versions: \"v1.0, v1.1, v1.2\", Description: \"A Container Storage Interface (CSI) Driver for Intelliflash  Block Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[Intelliflash File Storage](https://github.com/DDNStorage/intelliflash-csi-file-driver)\", DriverName: \"intelliflash-csi-file-driver.intelliflash.com\", Versions: \"v1.0, v1.1, v1.2\", Description: \"A Container Storage Interface (CSI) Driver for Intelliflash  File Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[ionir ](https://github.com/ionir-cloud)\", DriverName: \"ionir\", Versions: \"v1.2\", Description: \"A Container Storage Interface (CSI) Driver for [ionir](https://www.ionir.com/) Kubernetes-Native Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Cloning\"},\n\t{NameUrl: \"[JuiceFS](https://github.com/juicedata/juicefs-csi-driver)\", DriverName: \"csi.juicefs.com\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for JuiceFS File System\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pod\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[kaDalu](https://github.com/kadalu/kadalu)\", DriverName: \"org.kadalu.gluster\", Versions: \"v0.3\", Description: \"A CSI Driver (and operator) for GlusterFS\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[KumoScale Block Storage](https://github.com/KioxiaAmerica/kumoscale-csi)\", DriverName: \"kumoscale.kioxia.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for KumoScale Block Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Topology\"},\n\t{NameUrl: \"[Linode Block Storage](https://github.com/linode/linode-blockstorage-csi-driver)\", DriverName: \"linodebs.csi.linode.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Linode Block Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[LINSTOR](https://github.com/piraeusdatastore/linstor-csi)\", DriverName: \"linstor.csi.linbit.com\", Versions: \"v1.2\", Description: \"A Container Storage Interface (CSI) Driver for [LINSTOR](https://www.linbit.com/en/linstor/) volumes\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[Longhorn](https://github.com/longhorn/longhorn)\", DriverName: \"driver.longhorn.io\", Versions: \"v1.2\", Description: \"A Container Storage Interface (CSI) Driver for [Longhorn](https://longhorn.io/) volumes\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Node\", DynamicProvisioning: \"Yes\", Features: \"Raw Block\"},\n\t{NameUrl: \"[MacroSAN](https://github.com/macrosan-csi/macrosan-csi-driver)\", DriverName: \"csi-macrosan\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for MacroSAN Block Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[Manila](https://github.com/kubernetes/cloud-provider-openstack/tree/master/pkg/csi/manila)\", DriverName: \"manila.csi.openstack.org\", Versions: \"v1.1, v1.2\", Description: \"A Container Storage Interface (CSI) Driver for OpenStack Shared File System Service (Manila)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Topology\"},\n\t{NameUrl: \"[MooseFS](https://github.com/moosefs/moosefs-csi)\", DriverName: \"com.tuxera.csi.moosefs\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for [MooseFS](https://moosefs.com/) clusters.\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[NetApp](https://github.com/NetApp/trident)\", DriverName: \"csi.trident.netapp.io\", Versions: \"v1.0, v1.1, v1.2, v1.3\", Description: \"A Container Storage Interface (CSI) Driver for NetApp's [Trident](https://netapp-trident.readthedocs.io/) container storage orchestrator\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[NexentaStor File Storage](https://github.com/Nexenta/nexentastor-csi-driver)\", DriverName: \"nexentastor-csi-driver.nexenta.com\", Versions: \"v1.0, v1.1, v1.2\", Description: \"A Container Storage Interface (CSI) Driver for NexentaStor  File Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion, Cloning, Topology\"},\n\t{NameUrl: \"[NexentaStor Block Storage](https://github.com/Nexenta/nexentastor-csi-driver-block)\", DriverName: \"nexentastor-block-csi-driver.nexenta.com\", Versions: \"v1.0, v1.1, v1.2\", Description: \"A Container Storage Interface (CSI) Driver for NexentaStor over iSCSI protocol\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion, Cloning, Topology, Raw block\"},\n\t{NameUrl: \"[Nutanix](https://github.com/nutanix/csi-plugin)\", DriverName: \"csi.nutanix.com\", Versions: \"v0.3, v1.0, v1.2\", Description: \"A Container Storage Interface (CSI) Driver for Nutanix\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod with Nutanix Volumes and Read/Write Multiple Pods with Nutanix Files\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[OpenEBS](https://github.com/openebs/csi)\", DriverName: \"cstor.csi.openebs.io\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for  [OpenEBS](https://www.openebs.io/)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Expansion, Snapshot, Cloning\"},\n\t{NameUrl: \"[Open-E](https://github.com/open-e/JovianDSS-KubernetesCSI)\", DriverName: \"com.open-e.joviandss.csi\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Open-E JovianDSS Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Cloning\"},\n\t{NameUrl: \"[Open-Local](https://github.com/alibaba/open-local)\", DriverName: \"local.csi.alibaba.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Local Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Expansion, Snapshot\"},\n\t{NameUrl: \"[Oracle Cloud Infrastructure(OCI) Block Storage](https://github.com/oracle/oci-cloud-controller-manager/blob/master/container-storage-interface.md)\", DriverName: \"blockvolume.csi.oraclecloud.com\", Versions: \"v1.1\", Description: \"A Container Storage Interface (CSI) Driver for Oracle Cloud Infrastructure (OCI) Block Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Topology\"},\n\t{NameUrl: \"[oVirt](https://github.com/openshift/ovirt-csi-driver)\", DriverName: \"csi.ovirt.org\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for [oVirt](https://ovirt.org)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Block, File Storage\"},\n\t{NameUrl: \"[Portworx](https://github.com/libopenstorage/openstorage/tree/master/csi)\", DriverName: \"pxd.portworx.com\", Versions: \"v1.4\", Description: \"A Container Storage Interface (CSI) Driver for [Portworx](https://docs.portworx.com/portworx-install-with-kubernetes/storage-operations/csi/)\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion, Raw Block, Cloning\"},\n\t{NameUrl: \"[Pure Storage CSI](https://github.com/purestorage/pso-csi)\", DriverName: \"pure-csi\", Versions: \"v1.0, v1.1, v1.2, v1.3\", Description: \"A Container Storage Interface (CSI) Driver for Pure Storage's [Pure Service Orchestrator](https://purestorage.com/containers)\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Cloning, Raw Block, Topology, Expansion\"},\n\t{NameUrl: \"[QingCloud CSI](https://github.com/yunify/qingcloud-csi)\", DriverName: \"disk.csi.qingcloud.com\", Versions: \"v1.1\", Description: \"A Container Storage Interface (CSI) Driver for QingCloud Block Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[QingStor CSI](https://github.com/yunify/qingstor-csi)\", DriverName: \"neonsan.csi.qingstor.com\", Versions: \"v0.3, v1.1\", Description: \"A Container Storage Interface (CSI) Driver for NeonSAN storage system\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[Quobyte](https://github.com/quobyte/quobyte-csi)\", DriverName: \"quobyte-csi\", Versions: \"v0.2\", Description: \"A Container Storage Interface (CSI) Driver for Quobyte\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[ROBIN](https://get.robin.io/)\", DriverName: \"robin\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for [ROBIN](https://docs.robin.io)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[SandStone](https://github.com/sandstone-storage/sandstone-csi-driver)\", DriverName: \"csi-sandstone-plugin\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for SandStone USP\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[Sangfor-EDS-File-Storage](https://github.com/evan37717/sangfor-eds-csi)\", DriverName: \"eds.csi.file.sangfor.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Sangfor Distributed File Storage(EDS)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[Sangfor-EDS-Block-Storage](https://github.com/eds-wzc/sangfor-eds-csi)\", DriverName: \"eds.csi.block.sangfor.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Sangfor Block Storage(EDS)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[Scaleway CSI](https://github.com/scaleway/scaleway-csi)\", DriverName: \"csi.scaleway.com\", Versions: \"v1.2.0\", Description: \"Container Storage Interface (CSI) Driver for [Scaleway Block Storage](https://www.scaleway.com/block-storage/)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Topology\"},\n\t{NameUrl: \"[Seagate Exos X](https://github.com/Seagate/seagate-exos-x-csi)\", DriverName: \"csi-exos-x.seagate.com\", Versions: \"v1.3\", Description: \"CSI driver for [Seagate Exos X](https://www.seagate.com/products/storage/data-storage-systems/raid/) and OEM systems\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[SeaweedFS](https://github.com/seaweedfs/seaweedfs-csi-driver)\", DriverName: \"seaweedfs-csi-driver\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI Driver for [SeaweedFS](https://github.com/chrislusf/seaweedfs))\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[Secrets Store CSI Driver](https://github.com/kubernetes-sigs/secrets-store-csi-driver)\", DriverName: \"secrets-store.csi.k8s.io\", Versions: \"v0.0.10\", Description: \"A Container Storage Interface (CSI) Driver for mounting secrets, keys, and certs stored in enterprise-grade external secrets stores as volumes.\", Persistence: \"Ephemeral\", AccessModes: \"N/A\", DynamicProvisioning: \"N/A\", Features: \"\"},\n\t{NameUrl: \"[SmartX](http://www.smartx.com/?locale=en)\", DriverName: \"csi-smtx-plugin\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for SmartX ZBS Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion\"},\n\t{NameUrl: \"[SODA](https://github.com/sodafoundation/nbp/tree/master/csi)\", DriverName: \"csi-soda-plugin\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for [SODA](https://sodafoundation.io/)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot\"},\n\t{NameUrl: \"[SPDK-CSI](https://github.com/spdk/spdk-csi)\", DriverName: \"csi.spdk.io\", Versions: \"v1.1\", Description: \"A Container Storage Interface (CSI) Driver for [SPDK](https://spdk.io/)\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot\"},\n\t{NameUrl: \"[StorageOS](https://docs.storageos.com/docs/platforms/kubernetes/install/)\", DriverName: \"storageos\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for [StorageOS](https://storageos.com/)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot\"},\n\t{NameUrl: \"[Storidge](https://docs.storidge.com/kubernetes_storage/overview.html)\", DriverName: \"csi.cio.storidge.com\", Versions: \"v0.3, v1.0\", Description: \"A Container Storage Interface (CSI) Driver for [Storidge CIO](https://storidge.com/)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion\"},\n\t{NameUrl: \"[StorPool](https://kb.storpool.com/storpool_integrations/github/kubernetes.html)\", DriverName: \"csi-driver.storpool.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for [StorPool](https://storpool.com/)\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Expansion\"},\n\t{NameUrl: \"[Synology](https://github.com/SynologyOpenSource/synology-csi)\", DriverName: \"csi.san.synology.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Synology NAS\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[Tencent Cloud Block Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)\", DriverName: \"com.tencent.cloud.csi.cbs\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Tencent Cloud Block Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Snapshot\"},\n\t{NameUrl: \"[Tencent Cloud File Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)\", DriverName: \"com.tencent.cloud.csi.cfs\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Tencent Cloud File Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot\"},\n\t{NameUrl: \"[Tencent Cloud Object Storage](https://github.com/TencentCloud/kubernetes-csi-tencentcloud)\", DriverName: \"com.tencent.cloud.csi.cosfs\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for Tencent Cloud Object Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"No\", Features: \"Snapshot\"},\n\t{NameUrl: \"[TopoLVM](https://github.com/cybozu-go/topolvm)\", DriverName: \"topolvm.cybozu.com\", Versions: \"v1.1\", Description: \"A Container Storage Interface (CSI) Driver for LVM\", Persistence: \"Persistent and Ephemeral\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Expansion, Topology Aware\"},\n\t{NameUrl: \"[VAST Data](https://github.com/vast-data/vast-csi)\", DriverName: \"csi.vastdata.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for VAST Data\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Expansion, Topology Aware\"},\n\t{NameUrl: \"[XSKY-EBS](https://xsky-storage.github.io/xsky-csi-driver/csi-block.html)\", DriverName: \"csi.block.xsky.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for XSKY Distributed Block Storage (X-EBS)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[XSKY-EUS](https://xsky-storage.github.io/xsky-csi-driver/csi-fs.html)\", DriverName: \"csi.fs.xsky.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for XSKY Distributed File Storage (X-EUS)\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[Vault](https://github.com/kubevault/csi-driver)\", DriverName: \"secrets.csi.kubevault.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for mounting HashiCorp Vault secrets as volumes.\", Persistence: \"Ephemeral\", AccessModes: \"N/A\", DynamicProvisioning: \"N/A\", Features: \"\"},\n\t{NameUrl: \"[VDA](https://virtual-disk-array.readthedocs.io/en/latest/Introduction.html)\", DriverName: \"csi.vda.io\", Versions: \"v1.0\", Description: \"An open source block storage system base on SPDK\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"N/A\", Features: \"\"},\n\t{NameUrl: \"[Veritas InfoScale Volumes](https://www.veritas.com/solution/virtualization/containers.html)\", DriverName: \"org.veritas.infoscale\", Versions: \"v1.2\", Description: \"A Container Storage Interface (CSI) Driver for Veritas InfoScale volumes\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Snapshot, Expansion, Cloning\"},\n\t{NameUrl: \"[vSphere](https://github.com/kubernetes-sigs/vsphere-csi-driver)\", DriverName: \"csi.vsphere.vmware.com\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for VMware vSphere\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod (Block Volume) <br/><br/> Read/Write Multiple Pods (File Volume)\", DynamicProvisioning: \"Yes\", Features: \"Raw Block,<br/><br/>Expansion (Block Volume),<br/><br/>Topology Aware (Block Volume)\"},\n\t{NameUrl: \"[Vultr Block Storage](https://github.com/vultr/vultr-csi)\", DriverName: \"block.csi.vultr.com\", Versions: \"v1.2\", Description: \"A Container Storage Interface (CSI) Driver for Vultr Block Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[WekaIO](https://github.com/weka/csi-wekafs)\", DriverName: \"csi.weka.io\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for mounting WekaIO WekaFS filesystem as volumes\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[Yandex.Cloud](https://github.com/flant/yandex-csi-driver)\", DriverName: \"yandex.csi.flant.com\", Versions: \"v1.2\", Description: \"A Container Storage Interface (CSI) plugin for Yandex.Cloud Compute Disks\", Persistence: \"Persistent\", AccessModes: \"Read/Write Single Pod\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[YanRongYun](http://www.yanrongyun.com/)\", DriverName: \"?\", Versions: \"v1.0\", Description: \"A Container Storage Interface (CSI) Driver for YanRong YRCloudFile Storage\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"\"},\n\t{NameUrl: \"[Zadara-CSI](https://github.com/zadarastorage/zadara-csi)\", DriverName: \"csi.zadara.com\", Versions: \"v1.0, v1.1\", Description: \"A Container Storage Interface (CSI) plugin for Zadara VPSA Storage Array & VPSA All-Flash\", Persistence: \"Persistent\", AccessModes: \"Read/Write Multiple Pods\", DynamicProvisioning: \"Yes\", Features: \"Raw Block, Snapshot, Expansion, Cloning\"},\n}\n"
  },
  {
    "path": "pkg/kubestr/kubernetes_checks.go",
    "content": "package kubestr\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com/pkg/errors\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tversion \"k8s.io/apimachinery/pkg/version\"\n)\n\nconst (\n\t// MinK8sMajorVersion is the minimum supported Major version\n\tMinK8sMajorVersion = 1\n\t// MinK8sMinorVersion is the minimum supported Minor version\n\tMinK8sMinorVersion = 12\n\t// MinK8sGitVersion is the minimum supported k8s version\n\tMinK8sGitVersion = \"v1.12.0\"\n\t// RbacGroupName describe hte rbac group name\n\tRbacGroupName = \"rbac.authorization.k8s.io\"\n)\n\n// KubernetesChecks runs all the baseline checks on the cluster\nfunc (p *Kubestr) KubernetesChecks() []*TestOutput {\n\tvar result []*TestOutput\n\tresult = append(result, p.validateK8sVersion())\n\tresult = append(result, p.validateRBAC())\n\tresult = append(result, p.validateAggregatedLayer())\n\treturn result\n}\n\n// validateK8sVersion validates the clusters K8s version\nfunc (p *Kubestr) validateK8sVersion() *TestOutput {\n\ttestName := \"Kubernetes Version Check\"\n\tversion, err := p.validateK8sVersionHelper()\n\tif err != nil {\n\t\treturn MakeTestOutput(testName, StatusError, err.Error(), nil)\n\t}\n\treturn MakeTestOutput(testName, StatusOK, fmt.Sprintf(\"Valid kubernetes version (%s)\", version.String()), version)\n}\n\n// getK8sVersion fetches the k8s vesion\nfunc (p *Kubestr) validateK8sVersionHelper() (*version.Info, error) {\n\tversion, err := p.cli.Discovery().ServerVersion()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmajorStr := version.Major\n\tif len(majorStr) > 1 && string(majorStr[len(majorStr)-1]) == \"+\" {\n\t\tmajorStr = majorStr[:len(majorStr)-1]\n\t}\n\tmajor, err := strconv.Atoi(majorStr)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to derive kubernetes major version\")\n\t}\n\n\tminorStr := version.Minor\n\tif len(minorStr) > 1 && string(minorStr[len(minorStr)-1]) == \"+\" {\n\t\tminorStr = minorStr[:len(minorStr)-1]\n\t}\n\tminor, err := strconv.Atoi(minorStr)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to derive kubernetes minor version\")\n\t}\n\tif (major < MinK8sMajorVersion) ||\n\t\t(major == MinK8sMajorVersion && minor < MinK8sMinorVersion) {\n\t\treturn version, fmt.Errorf(\"current kubernetes version (%s) is not supported, minimum version is %s\", version.String(), MinK8sGitVersion)\n\t}\n\treturn version, nil\n}\n\nfunc (p *Kubestr) validateRBAC() *TestOutput {\n\ttestName := \"RBAC Check\"\n\t//fmt.Println(\"  Checking if Kubernetes RBAC is enabled:\")\n\tgroup, err := p.validateRBACHelper()\n\tif err != nil {\n\t\treturn MakeTestOutput(testName, StatusError, err.Error(), nil)\n\t}\n\treturn MakeTestOutput(testName, StatusOK, \"Kubernetes RBAC is enabled\", *group)\n}\n\n// getRBAC runs the Rbac test\nfunc (p *Kubestr) validateRBACHelper() (*v1.APIGroup, error) {\n\tserverGroups, err := p.cli.Discovery().ServerGroups()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, group := range serverGroups.Groups {\n\t\tif group.Name == RbacGroupName {\n\t\t\treturn &group, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"Kubernetes RBAC is not enabled\") //nolint:staticcheck\n}\n\nfunc (p *Kubestr) validateAggregatedLayer() *TestOutput {\n\ttestName := \"Aggregated Layer Check\"\n\tresourceList, err := p.validateAggregatedLayerHelper()\n\tif err != nil {\n\t\tMakeTestOutput(testName, StatusError, err.Error(), nil)\n\t}\n\treturn MakeTestOutput(testName, StatusOK, \"The Kubernetes Aggregated Layer is enabled\", resourceList)\n}\n\n// getAggregatedLayer checks the aggregated API layer\nfunc (p *Kubestr) validateAggregatedLayerHelper() (*v1.APIResourceList, error) {\n\t_, serverResources, err := p.cli.Discovery().ServerGroupsAndResources()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, resourceList := range serverResources {\n\t\tif resourceList.GroupVersion == \"apiregistration.k8s.io/v1\" || resourceList.GroupVersion == \"apiregistration.k8s.io/v1beta1\" {\n\t\t\treturn resourceList, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"can not detect the Aggregated API Layer, is it enabled?\")\n}\n"
  },
  {
    "path": "pkg/kubestr/kubernetes_checks_test.go",
    "content": "package kubestr\n\nimport (\n\t\"testing\"\n\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tversion \"k8s.io/apimachinery/pkg/version\"\n\tdiscoveryfake \"k8s.io/client-go/discovery/fake\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n\n\t. \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype K8sChecksTestSuite struct{}\n\nvar _ = Suite(&K8sChecksTestSuite{})\n\nfunc (s *K8sChecksTestSuite) TestGetK8sVersion(c *C) {\n\tfor _, tc := range []struct {\n\t\tver     *version.Info\n\t\tchecker Checker\n\t\tout     *version.Info\n\t}{\n\t\t{\n\t\t\tver:     &version.Info{Major: \"1\", Minor: \"17\", GitVersion: \"v1.17\"},\n\t\t\tchecker: IsNil,\n\t\t\tout:     &version.Info{Major: \"1\", Minor: \"17\", GitVersion: \"v1.17\"},\n\t\t},\n\t\t{\n\t\t\tver:     &version.Info{Major: \"1\", Minor: \"11\", GitVersion: \"v1.11\"},\n\t\t\tchecker: NotNil,\n\t\t\tout:     &version.Info{Major: \"1\", Minor: \"11\", GitVersion: \"v1.11\"},\n\t\t},\n\t\t{\n\t\t\tver:     &version.Info{Major: \"1\", Minor: \"\", GitVersion: \"v1.\"},\n\t\t\tchecker: NotNil,\n\t\t\tout:     nil,\n\t\t},\n\t\t{\n\t\t\tver:     &version.Info{Major: \"\", Minor: \"11\", GitVersion: \"v.\"},\n\t\t\tchecker: NotNil,\n\t\t\tout:     nil,\n\t\t},\n\t} {\n\t\tcli := fake.NewSimpleClientset()\n\t\tcli.Discovery().(*discoveryfake.FakeDiscovery).FakedServerVersion = tc.ver\n\t\tp := &Kubestr{cli: cli}\n\t\tout, err := p.validateK8sVersionHelper()\n\t\tc.Assert(out, DeepEquals, tc.out)\n\t\tc.Check(err, tc.checker)\n\t}\n}\n\nfunc (s *K8sChecksTestSuite) TestValidateRBAC(c *C) {\n\tfor _, tc := range []struct {\n\t\tresources []*metav1.APIResourceList\n\t\tchecker   Checker\n\t\tout       *metav1.APIGroup\n\t}{\n\t\t{\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"/////\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tchecker: NotNil,\n\t\t\tout:     nil,\n\t\t},\n\t\t{\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"rbac.authorization.k8s.io/v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tchecker: IsNil,\n\t\t\tout: &metav1.APIGroup{\n\t\t\t\tName: \"rbac.authorization.k8s.io\",\n\t\t\t\tVersions: []metav1.GroupVersionForDiscovery{\n\t\t\t\t\t{GroupVersion: \"rbac.authorization.k8s.io/v1\", Version: \"v1\"},\n\t\t\t\t},\n\t\t\t\tPreferredVersion: metav1.GroupVersionForDiscovery{GroupVersion: \"rbac.authorization.k8s.io/v1\", Version: \"v1\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"notrbac.authorization.k8s.io/v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tchecker: NotNil,\n\t\t\tout:     nil,\n\t\t},\n\t} {\n\t\tcli := fake.NewSimpleClientset()\n\t\tcli.Discovery().(*discoveryfake.FakeDiscovery).Resources = tc.resources\n\t\tp := &Kubestr{cli: cli}\n\t\tout, err := p.validateRBACHelper()\n\t\tc.Assert(out, DeepEquals, tc.out)\n\t\tc.Check(err, tc.checker)\n\t}\n}\n\nfunc (s *K8sChecksTestSuite) TestValidateAggregatedLayer(c *C) {\n\tfor _, tc := range []struct {\n\t\tresources []*metav1.APIResourceList\n\t\tchecker   Checker\n\t\tout       *metav1.APIResourceList\n\t}{\n\t\t{\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"/////\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tchecker: NotNil,\n\t\t\tout:     nil,\n\t\t},\n\t\t{\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"apiregistration.k8s.io/v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tchecker: IsNil,\n\t\t\tout: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: \"apiregistration.k8s.io/v1\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"apiregistration.k8s.io/v1beta1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tchecker: IsNil,\n\t\t\tout: &metav1.APIResourceList{\n\t\t\t\tGroupVersion: \"apiregistration.k8s.io/v1beta1\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"notapiregistration.k8s.io/v1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tchecker: NotNil,\n\t\t\tout:     nil,\n\t\t},\n\t} {\n\t\tcli := fake.NewSimpleClientset()\n\t\tcli.Discovery().(*discoveryfake.FakeDiscovery).Resources = tc.resources\n\t\tp := &Kubestr{cli: cli}\n\t\tout, err := p.validateAggregatedLayerHelper()\n\t\tc.Assert(out, DeepEquals, tc.out)\n\t\tc.Check(err, tc.checker)\n\t}\n}\n"
  },
  {
    "path": "pkg/kubestr/kubestr.go",
    "content": "package kubestr\n\nimport (\n\t\"github.com/kanisterio/kanister/pkg/kube\"\n\t\"github.com/kastenhq/kubestr/pkg/fio\"\n\t\"github.com/pkg/errors\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\tunstructured \"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/client-go/dynamic\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\n// Kubestr is the primary object for running the kubestr tool. It holds all the cluster state information\n// as well.\ntype Kubestr struct {\n\tcli                     kubernetes.Interface\n\tdynCli                  dynamic.Interface\n\tsdsfgValidator          snapshotDataSourceFG\n\tstorageClassList        *sv1.StorageClassList\n\tvolumeSnapshotClassList *unstructured.UnstructuredList\n\tFio                     fio.FIO\n}\n\nconst Logo = `\n**************************************\n  _  ___   _ ___ ___ ___ _____ ___\n  | |/ / | | | _ ) __/ __|_   _| _ \\\n  | ' <| |_| | _ \\ _|\\__ \\ | | |   /\n  |_|\\_\\\\___/|___/___|___/ |_| |_|_\\\n\nExplore your Kubernetes storage options\n**************************************\n`\n\nvar (\n\tDefaultQPS   = float32(50)\n\tDefaultBurst = 100\n)\n\n// NewKubestr initializes a new kubestr object to run preflight tests\nfunc NewKubestr() (*Kubestr, error) {\n\tcli, err := LoadKubeCli()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdynCli, err := LoadDynCli()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Kubestr{\n\t\tcli:    cli,\n\t\tdynCli: dynCli,\n\t\tsdsfgValidator: &snapshotDataSourceFGValidator{\n\t\t\tcli:    cli,\n\t\t\tdynCli: dynCli,\n\t\t},\n\t\tFio: &fio.FIOrunner{\n\t\t\tCli: cli,\n\t\t},\n\t}, nil\n}\n\n// LoadDynCli loads the config and returns a dynamic CLI\nfunc LoadDynCli() (dynamic.Interface, error) {\n\tcfg, err := kube.LoadConfig()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load config for Dynamic client\")\n\t}\n\tclientset, err := dynamic.NewForConfig(cfg)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create Dynamic client\")\n\t}\n\treturn clientset, nil\n}\n\n// LoadKubeCli load the config and returns a kubernetes client\n// NewClient returns a k8 client configured by the kanister environment.\nfunc LoadKubeCli() (kubernetes.Interface, error) {\n\tconfig, err := kube.LoadConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconfig.QPS = DefaultQPS\n\tconfig.Burst = DefaultBurst\n\t// creates the clientset\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn clientset, nil\n}\n"
  },
  {
    "path": "pkg/kubestr/storage_provisioners.go",
    "content": "package kubestr\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\tkanvolume \"github.com/kanisterio/kanister/pkg/kube/volume\"\n\t\"github.com/kastenhq/kubestr/pkg/common\"\n\t\"github.com/pkg/errors\"\n\tv1 \"k8s.io/api/core/v1\"\n\tsv1 \"k8s.io/api/storage/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tunstructured \"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/client-go/dynamic\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\nconst (\n\t// APIVersionKey describes the APIVersion key\n\tAPIVersionKey = \"apiVersion\"\n\t// FeatureGateTestPVCName is the name of the pvc created by the feature gate\n\t// validation test\n\tFeatureGateTestPVCName = \"kubestr-featuregate-test\"\n\t// DefaultNS describes the default namespace\n\tDefaultNS = \"default\"\n\t// PodNamespaceEnvKey describes the pod namespace env variable\n\tPodNamespaceEnvKey = \"POD_NAMESPACE\"\n)\n\n// Provisioner holds the important information of a provisioner\ntype Provisioner struct {\n\tProvisionerName       string\n\tCSIDriver             *CSIDriver\n\tURL                   string\n\tStorageClasses        []*SCInfo\n\tVolumeSnapshotClasses []*VSCInfo\n\tStatusList            []Status\n}\n\ntype CSIDriver struct {\n\tNameUrl             string\n\tDriverName          string\n\tVersions            string\n\tDescription         string\n\tPersistence         string\n\tAccessModes         string\n\tDynamicProvisioning string\n\tFeatures            string\n}\n\nfunc (c *CSIDriver) Provider() string {\n\tre := regexp.MustCompile(`\\[(.*?)\\]`)\n\tmatch := re.FindStringSubmatch(c.NameUrl) // find the left most match\n\tif len(match) < 2 {\n\t\treturn \"\"\n\t}\n\treturn match[1]\n}\n\nfunc (c *CSIDriver) URL() string {\n\tre := regexp.MustCompile(`\\((.*?)\\)`)\n\tmatch := re.FindAllStringSubmatch(c.NameUrl, -1)\n\tif len(match) < 1 {\n\t\treturn \"\"\n\t}\n\turl := match[len(match)-1] // find the right most match\n\tif len(url) < 2 {\n\t\treturn \"\"\n\t}\n\treturn url[1]\n}\n\nfunc (c *CSIDriver) Print(prefix string) {\n\tfmt.Printf(prefix+\"  Provider:            %s\\n\", c.Provider())\n\tfmt.Printf(prefix+\"  Website:             %s\\n\", c.URL())\n\tfmt.Printf(prefix+\"  Description:         %s\\n\", c.Description)\n\tfmt.Printf(prefix+\"  Additional Features: %s\\n\", c.Features)\n}\n\nfunc (c *CSIDriver) SupportsSnapshots() bool {\n\treturn strings.Contains(c.Features, \"Snapshot\")\n}\n\n// SCInfo stores the info of a StorageClass\ntype SCInfo struct {\n\tName       string\n\tStatusList []Status\n\tRaw        interface{} `json:\",omitempty\"`\n}\n\n// VSCInfo stores the info of a VolumeSnapshotClass\ntype VSCInfo struct {\n\tName          string\n\tStatusList    []Status\n\tHasAnnotation bool\n\tRaw           interface{} `json:\",omitempty\"`\n}\n\n// Print prints the provionsioner specific details\nfunc (v *Provisioner) Print() {\n\tprintSuccessColor(\"  \" + v.ProvisionerName + \":\")\n\tfor _, status := range v.StatusList {\n\t\tstatus.Print(\"    \")\n\t}\n\tswitch {\n\tcase v.CSIDriver != nil:\n\t\tfmt.Println(\"    This is a CSI driver!\")\n\t\tfmt.Println(\"    (The following info may not be up to date. Please check with the provider for more information.)\")\n\t\tv.CSIDriver.Print(\"  \")\n\tcase strings.HasPrefix(v.ProvisionerName, \"kubernetes.io\"):\n\t\tfmt.Println(\"    This is an in tree provisioner.\")\n\tcase strings.Contains(v.ProvisionerName, \"csi\"):\n\t\tfmt.Println(\"    This might be a CSI Driver. But it is not publicly listed.\")\n\tdefault:\n\t\tfmt.Println(\"    Unknown driver type.\")\n\t}\n\tfmt.Println()\n\tif len(v.StorageClasses) > 0 {\n\t\tfmt.Printf(\"    Storage Classes:\\n\")\n\t\tfor _, sc := range v.StorageClasses {\n\t\t\tfmt.Printf(\"      * %s\\n\", sc.Name)\n\t\t\tfor _, status := range sc.StatusList {\n\t\t\t\tstatus.Print(\"        \")\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(v.VolumeSnapshotClasses) > 0 {\n\t\tfmt.Printf(\"    Volume Snapshot Classes:\\n\")\n\t\tfor _, vsc := range v.VolumeSnapshotClasses {\n\t\t\tfmt.Printf(\"      * %s\\n\", vsc.Name)\n\t\t\tfor _, status := range vsc.StatusList {\n\t\t\t\tstatus.Print(\"        \")\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(v.StorageClasses) > 0 {\n\t\tfmt.Println()\n\t\tfmt.Println(\"    To perform a FIO test, run-\")\n\t\tfmt.Println(\"      ./kubestr fio -s <storage class>\")\n\t\tfmt.Println()\n\t\tfmt.Println(\"    To perform a check for block device support, run-\")\n\t\tfmt.Println(\"      ./kubestr blockmount -s <storage class>\")\n\t\tswitch {\n\t\tcase len(v.VolumeSnapshotClasses) == 0 && v.CSIDriver != nil && v.CSIDriver.SupportsSnapshots():\n\t\t\tfmt.Println()\n\t\t\tfmt.Println(\"    This provisioner supports snapshots, however no Volume Snaphsot Classes were found.\")\n\t\tcase len(v.VolumeSnapshotClasses) > 0:\n\t\t\tfmt.Println()\n\t\t\tfmt.Println(\"    To test CSI snapshot/restore functionality, run-\")\n\t\t\tfmt.Println(\"      ./kubestr csicheck -s <storage class> -v <volume snapshot class>\")\n\t\t}\n\t}\n}\n\n// ValidateProvisioners validates the provisioners in a cluster\nfunc (p *Kubestr) ValidateProvisioners(ctx context.Context) ([]*Provisioner, error) {\n\tprovisionerList, err := p.provisionerList(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error listing provisioners: %w\", err)\n\t}\n\tvar validateProvisionersOutput []*Provisioner\n\tfor _, provisioner := range provisionerList {\n\t\tprocessedProvisioner, err := p.processProvisioner(ctx, provisioner)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvalidateProvisionersOutput = append(validateProvisionersOutput, processedProvisioner)\n\t}\n\treturn validateProvisionersOutput, nil\n}\n\nfunc (p *Kubestr) processProvisioner(ctx context.Context, provisioner string) (*Provisioner, error) {\n\tretProvisioner := &Provisioner{\n\t\tProvisionerName: provisioner,\n\t}\n\n\tstorageClassList, err := p.loadStorageClasses(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, storageClass := range storageClassList.Items {\n\t\tif storageClass.Provisioner == provisioner {\n\t\t\tretProvisioner.StorageClasses = append(retProvisioner.StorageClasses,\n\t\t\t\tp.validateStorageClass(provisioner, storageClass)) // review this\n\t\t}\n\t}\n\n\tfor _, csiDriver := range CSIDriverList {\n\t\tif strings.Contains(provisioner, csiDriver.DriverName) {\n\t\t\tretProvisioner.CSIDriver = csiDriver\n\t\t}\n\t}\n\n\tif retProvisioner.CSIDriver != nil {\n\t\tif !p.hasCSIDriverObject(ctx, provisioner) {\n\t\t\tretProvisioner.StatusList = append(retProvisioner.StatusList,\n\t\t\t\tmakeStatus(StatusWarning, \"Missing CSIDriver Object. Required by some provisioners.\", nil))\n\t\t}\n\t\tif clusterCsiSnapshotCapable, err := p.isK8sVersionCSISnapshotCapable(ctx); err != nil || !clusterCsiSnapshotCapable {\n\t\t\tretProvisioner.StatusList = append(retProvisioner.StatusList,\n\t\t\t\tmakeStatus(StatusInfo, \"Cluster is not CSI snapshot capable. Requires VolumeSnapshotDataSource feature gate.\", nil))\n\t\t\treturn retProvisioner, errors.Wrap(err, \"failed to validate if Kubernetes version was CSI capable\")\n\t\t}\n\t\tcsiSnapshotGroupVersion := p.getCSIGroupVersion()\n\t\tif csiSnapshotGroupVersion == nil {\n\t\t\tretProvisioner.StatusList = append(retProvisioner.StatusList,\n\t\t\t\tmakeStatus(StatusInfo, \"Can't find the CSI snapshot group api version.\", nil))\n\t\t\treturn retProvisioner, nil\n\t\t}\n\t\t// load volumeSnapshotClass\n\t\tvscs, err := p.loadVolumeSnapshotClasses(ctx, csiSnapshotGroupVersion.Version)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to load volume snapshot classes\")\n\t\t}\n\t\tfor _, vsc := range vscs.Items {\n\t\t\tif p.getDriverNameFromUVSC(vsc, csiSnapshotGroupVersion.GroupVersion) == provisioner {\n\t\t\t\tretProvisioner.VolumeSnapshotClasses = append(retProvisioner.VolumeSnapshotClasses,\n\t\t\t\t\tp.validateVolumeSnapshotClass(vsc, csiSnapshotGroupVersion.GroupVersion))\n\t\t\t}\n\t\t}\n\t}\n\treturn retProvisioner, nil\n}\n\n// hasCSIDriverObject sees if a provisioner has a CSIDriver Object\nfunc (p *Kubestr) hasCSIDriverObject(ctx context.Context, provisioner string) bool {\n\tcsiDrivers, err := p.cli.StorageV1beta1().CSIDrivers().List(ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\treturn false\n\t}\n\tfor _, driver := range csiDrivers.Items {\n\t\tif driver.Name == provisioner {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (p *Kubestr) isK8sVersionCSISnapshotCapable(ctx context.Context) (bool, error) {\n\tk8sVersion, err := p.validateK8sVersionHelper()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tminorStr := k8sVersion.Minor\n\tif string(minorStr[len(minorStr)-1]) == \"+\" {\n\t\tminorStr = minorStr[:len(minorStr)-1]\n\t}\n\tminor, err := strconv.Atoi(minorStr)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif minor < 17 && k8sVersion.Major == \"1\" {\n\t\treturn p.sdsfgValidator.validate(ctx)\n\t}\n\treturn true, nil\n}\n\n// validateStorageClass validates a storageclass\nfunc (p *Kubestr) validateStorageClass(provisioner string, storageClass sv1.StorageClass) *SCInfo {\n\tscStatus := &SCInfo{\n\t\tName: storageClass.Name,\n\t\tRaw:  storageClass,\n\t}\n\treturn scStatus\n}\n\n// validateVolumeSnapshotClass validates the VolumeSnapshotClass\nfunc (p *Kubestr) validateVolumeSnapshotClass(vsc unstructured.Unstructured, groupVersion string) *VSCInfo {\n\tretVSC := &VSCInfo{\n\t\tName: vsc.GetName(),\n\t\tRaw:  vsc,\n\t}\n\tif groupVersion != common.SnapshotVersion {\n\t\tretVSC.StatusList = append(retVSC.StatusList,\n\t\t\tmakeStatus(StatusError, fmt.Sprintf(\"Unsupported GroupVersion (%s) for VolumeSnapshotClass (%s)\", vsc.GetName(), groupVersion), nil))\n\t\treturn retVSC\n\t}\n\t_, ok := vsc.Object[common.VolSnapClassDriverKey]\n\tif !ok {\n\t\tretVSC.StatusList = append(retVSC.StatusList,\n\t\t\tmakeStatus(StatusError, fmt.Sprintf(\"VolumeSnapshotClass (%s) missing 'driver' field\", vsc.GetName()), nil))\n\t}\n\treturn retVSC\n}\n\nfunc (p *Kubestr) provisionerList(ctx context.Context) ([]string, error) {\n\tstorageClassList, err := p.loadStorageClasses(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tprovisionerSet := make(map[string]struct{})\n\tfor _, storageClass := range storageClassList.Items {\n\t\tprovisionerSet[storageClass.Provisioner] = struct{}{}\n\t}\n\treturn convertSetToSlice(provisionerSet), nil\n}\n\nfunc (p *Kubestr) loadStorageClasses(ctx context.Context) (*sv1.StorageClassList, error) {\n\tif p.storageClassList == nil {\n\t\tsc, err := p.cli.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tp.storageClassList = sc\n\t}\n\treturn p.storageClassList, nil\n}\n\nfunc (p *Kubestr) loadVolumeSnapshotClasses(ctx context.Context, version string) (*unstructured.UnstructuredList, error) {\n\tif p.volumeSnapshotClassList == nil {\n\t\tVolSnapClassGVR := schema.GroupVersionResource{Group: common.SnapGroupName, Version: version, Resource: common.VolumeSnapshotClassResourcePlural}\n\t\tus, err := p.dynCli.Resource(VolSnapClassGVR).List(ctx, metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tp.volumeSnapshotClassList = us\n\t}\n\treturn p.volumeSnapshotClassList, nil\n}\n\n// getDriverNameFromUVSC get the driver name from an unstructured VSC\nfunc (p *Kubestr) getDriverNameFromUVSC(vsc unstructured.Unstructured, version string) string {\n\tvar driverName interface{}\n\tvar ok bool\n\tif version != common.SnapshotVersion {\n\t\treturn \"\"\n\t}\n\tdriverName, ok = vsc.Object[common.VolSnapClassDriverKey]\n\tif !ok {\n\t\treturn \"\"\n\t}\n\tdriver, ok := driverName.(string)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\treturn driver\n}\n\n// getCSIGroupVersion fetches the CSI Group Version\nfunc (p *Kubestr) getCSIGroupVersion() *metav1.GroupVersionForDiscovery {\n\tgroups, _, err := p.cli.Discovery().ServerGroupsAndResources()\n\tif err != nil {\n\t\treturn nil\n\t}\n\tfor _, group := range groups {\n\t\tif group.Name == common.SnapGroupName {\n\t\t\treturn &group.PreferredVersion\n\t\t}\n\t}\n\treturn nil\n}\n\ntype snapshotDataSourceFG interface {\n\tvalidate(ctx context.Context) (bool, error)\n}\n\ntype snapshotDataSourceFGValidator struct {\n\tcli    kubernetes.Interface\n\tdynCli dynamic.Interface\n}\n\nfunc (s *snapshotDataSourceFGValidator) validate(ctx context.Context) (bool, error) {\n\tns := getPodNamespace()\n\n\t// deletes if exists. If it doesn't exist, this is a noop\n\terr := kanvolume.DeletePVC(s.cli, ns, FeatureGateTestPVCName)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"error deleting VolumeSnapshotDataSource feature-gate validation pvc\")\n\t}\n\t// defer delete\n\tdefer func() {\n\t\t_ = kanvolume.DeletePVC(s.cli, ns, FeatureGateTestPVCName)\n\t}()\n\n\t// create PVC\n\tsnapshotKind := \"VolumeSnapshot\"\n\tsnapshotAPIGroup := \"snapshot.storage.k8s.io\"\n\tpvc := &v1.PersistentVolumeClaim{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: FeatureGateTestPVCName,\n\t\t},\n\t\tSpec: v1.PersistentVolumeClaimSpec{\n\t\t\tAccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},\n\t\t\tDataSource: &v1.TypedLocalObjectReference{\n\t\t\t\tAPIGroup: &snapshotAPIGroup,\n\t\t\t\tKind:     snapshotKind,\n\t\t\t\tName:     \"fakeSnap\",\n\t\t\t},\n\t\t\tResources: v1.VolumeResourceRequirements{\n\t\t\t\tRequests: v1.ResourceList{\n\t\t\t\t\tv1.ResourceStorage: resource.MustParse(\"1Gi\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tpvcRes, err := s.cli.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"error creating VolumeSnapshotDataSource feature-gate validation pvc\")\n\t}\n\tif pvcRes.Spec.DataSource == nil {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n"
  },
  {
    "path": "pkg/kubestr/storage_provisioners_test.go",
    "content": "package kubestr\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tkansnapshot \"github.com/kanisterio/kanister/pkg/kube/snapshot\"\n\t. \"gopkg.in/check.v1\"\n\tscv1 \"k8s.io/api/storage/v1\"\n\t\"k8s.io/api/storage/v1beta1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\tversion \"k8s.io/apimachinery/pkg/version\"\n\tdiscoveryfake \"k8s.io/client-go/discovery/fake\"\n\tfakedynamic \"k8s.io/client-go/dynamic/fake\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\ntype ProvisionerTestSuite struct{}\n\nvar _ = Suite(&ProvisionerTestSuite{})\n\nfunc (s *ProvisionerTestSuite) TestHasCSIDriverObject(c *C) {\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\tcli             kubernetes.Interface\n\t\tprovisionerName string\n\t\thasDriver       bool\n\t}{\n\t\t{\n\t\t\tcli:             fake.NewSimpleClientset(),\n\t\t\tprovisionerName: \"provisioner\",\n\t\t\thasDriver:       false,\n\t\t},\n\t\t{\n\t\t\tcli: fake.NewSimpleClientset(&v1beta1.CSIDriverList{\n\t\t\t\tItems: []v1beta1.CSIDriver{\n\t\t\t\t\t{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"drivername\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}}),\n\t\t\tprovisionerName: \"drivername\",\n\t\t\thasDriver:       true,\n\t\t},\n\t} {\n\t\tp := &Kubestr{cli: tc.cli}\n\t\thasDriver := p.hasCSIDriverObject(ctx, tc.provisionerName)\n\t\tc.Assert(hasDriver, Equals, tc.hasDriver)\n\t}\n}\n\nfunc (s *ProvisionerTestSuite) TestIsK8sVersionCSISnapshotCapable(c *C) {\n\tctx := context.Background()\n\tfor _, tc := range []struct {\n\t\tver     *version.Info\n\t\tchecker Checker\n\t\tcapable bool\n\t\tsdsfg   snapshotDataSourceFG\n\t}{\n\t\t{\n\t\t\tver:     &version.Info{Major: \"1\", Minor: \"\", GitVersion: \"v1.17\"},\n\t\t\tchecker: NotNil,\n\t\t\tcapable: false,\n\t\t},\n\t\t{\n\t\t\tver:     &version.Info{Major: \"1\", Minor: \"15+\", GitVersion: \"v1.15+\"},\n\t\t\tchecker: NotNil,\n\t\t\tcapable: false,\n\t\t\tsdsfg:   &fakeSDSFGValidator{err: fmt.Errorf(\"someerror\"), cap: false},\n\t\t},\n\t\t{\n\t\t\tver:     &version.Info{Major: \"1\", Minor: \"15+\", GitVersion: \"v1.15+\"},\n\t\t\tchecker: IsNil,\n\t\t\tcapable: true,\n\t\t\tsdsfg:   &fakeSDSFGValidator{err: nil, cap: true},\n\t\t},\n\t\t{\n\t\t\tver:     &version.Info{Major: \"1\", Minor: \"17\", GitVersion: \"v1.17\"},\n\t\t\tchecker: IsNil,\n\t\t\tcapable: true,\n\t\t},\n\t} {\n\t\tcli := fake.NewSimpleClientset()\n\t\tcli.Discovery().(*discoveryfake.FakeDiscovery).FakedServerVersion = tc.ver\n\t\tp := &Kubestr{cli: cli, sdsfgValidator: tc.sdsfg}\n\t\tcap, err := p.isK8sVersionCSISnapshotCapable(ctx)\n\t\tc.Check(err, tc.checker)\n\t\tc.Assert(cap, Equals, tc.capable)\n\t}\n}\n\ntype fakeSDSFGValidator struct {\n\terr error\n\tcap bool\n}\n\nfunc (f *fakeSDSFGValidator) validate(ctx context.Context) (bool, error) {\n\treturn f.cap, f.err\n}\n\nfunc (s *ProvisionerTestSuite) TestValidateVolumeSnapshotClass(c *C) {\n\tfor _, tc := range []struct {\n\t\tvsc          unstructured.Unstructured\n\t\tgroupVersion string\n\t\tout          *VSCInfo\n\t}{\n\t\t{\n\t\t\tvsc: unstructured.Unstructured{\n\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\t\t\"name\": \"vsc1\",\n\t\t\t\t\t},\n\t\t\t\t\t\"driver\": \"something\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tgroupVersion: \"snapshot.storage.k8s.io/v1\",\n\t\t\tout: &VSCInfo{\n\t\t\t\tName: \"vsc1\",\n\t\t\t},\n\t\t},\n\t\t{ // failure\n\t\t\tvsc: unstructured.Unstructured{\n\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\t\t\"name\": \"vsc1\",\n\t\t\t\t\t},\n\t\t\t\t\t\"notdriver\": \"something\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tgroupVersion: \"snapshot.storage.k8s.io/v1\",\n\t\t\tout: &VSCInfo{\n\t\t\t\tName: \"vsc1\",\n\t\t\t\tStatusList: []Status{\n\t\t\t\t\tmakeStatus(StatusError, fmt.Sprintf(\"VolumeSnapshotClass (%s) missing 'driver' field\", \"vsc1\"), nil),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t} {\n\t\tp := &Kubestr{}\n\t\tout := p.validateVolumeSnapshotClass(tc.vsc, tc.groupVersion)\n\t\tc.Assert(out.Name, Equals, tc.out.Name)\n\t\tc.Assert(len(out.StatusList), Equals, len(tc.out.StatusList))\n\t}\n}\n\nfunc (s *ProvisionerTestSuite) TestLoadStorageClassesAndProvisioners(c *C) {\n\tctx := context.Background()\n\tp := &Kubestr{cli: fake.NewSimpleClientset(\n\t\t&scv1.StorageClass{ObjectMeta: metav1.ObjectMeta{Name: \"sc1\"}, Provisioner: \"provisioner1\"},\n\t\t&scv1.StorageClass{ObjectMeta: metav1.ObjectMeta{Name: \"sc2\"}, Provisioner: \"provisioner2\"},\n\t)}\n\tscs, err := p.loadStorageClasses(ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(len(scs.Items), Equals, 2)\n\tc.Assert(scs, Equals, p.storageClassList)\n\n\t// reload has the same\n\tp.cli = fake.NewSimpleClientset()\n\tscs, err = p.loadStorageClasses(ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(len(scs.Items), Equals, 2)\n\tc.Assert(scs, Equals, p.storageClassList)\n\n\t// proviosners uses loaded list\n\tprovisioners, err := p.provisionerList(ctx)\n\tc.Assert(err, IsNil)\n\tc.Assert(len(provisioners), Equals, 2)\n}\n\nfunc (s *ProvisionerTestSuite) TestLoadVolumeSnaphsotClasses(c *C) {\n\tctx := context.Background()\n\tscheme := runtime.NewScheme()\n\tscheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: \"snapshot.storage.k8s.io\", Version: \"v1\", Kind: \"VolumeSnapshotClassList\"}, &unstructured.UnstructuredList{})\n\tp := &Kubestr{dynCli: fakedynamic.NewSimpleDynamicClient(scheme, &unstructured.Unstructured{\n\t\tObject: map[string]interface{}{\n\t\t\t\"apiVersion\": fmt.Sprintf(\"%s/%s\", kansnapshot.GroupName, kansnapshot.Version),\n\t\t\t\"kind\":       \"VolumeSnapshotClass\",\n\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\"name\": \"theVSC\",\n\t\t\t},\n\t\t\t\"driver\":         \"somesnapshotter\",\n\t\t\t\"deletionPolicy\": \"Delete\",\n\t\t},\n\t})}\n\tvsc, err := p.loadVolumeSnapshotClasses(ctx, kansnapshot.Version)\n\tc.Assert(err, IsNil)\n\tc.Assert(len(vsc.Items), Equals, 1)\n\tc.Assert(vsc, Equals, p.volumeSnapshotClassList)\n\n\t// reload has the same\n\tp.dynCli = fakedynamic.NewSimpleDynamicClient(runtime.NewScheme())\n\tvsc, err = p.loadVolumeSnapshotClasses(ctx, kansnapshot.Version)\n\tc.Assert(err, IsNil)\n\tc.Assert(len(vsc.Items), Equals, 1)\n\tc.Assert(vsc, Equals, p.volumeSnapshotClassList)\n}\n\nfunc (s *ProvisionerTestSuite) TestGetCSIGroupVersion(c *C) {\n\tfor _, tc := range []struct {\n\t\tresources []*metav1.APIResourceList\n\t\tout       *metav1.GroupVersionForDiscovery\n\t}{\n\t\t{\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"/////\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tout: nil,\n\t\t},\n\t\t{\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"snapshot.storage.k8s.io/v1beta1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"snapshot.storage.k8s.io/v1apha1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tout: &metav1.GroupVersionForDiscovery{\n\t\t\t\tGroupVersion: \"snapshot.storage.k8s.io/v1beta1\",\n\t\t\t\tVersion:      \"v1beta1\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tresources: []*metav1.APIResourceList{\n\t\t\t\t{\n\t\t\t\t\tGroupVersion: \"NOTsnapshot.storage.k8s.io/v1beta1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tout: nil,\n\t\t},\n\t} {\n\t\tcli := fake.NewSimpleClientset()\n\t\tcli.Discovery().(*discoveryfake.FakeDiscovery).Resources = tc.resources\n\t\tp := &Kubestr{cli: cli}\n\t\tout := p.getCSIGroupVersion()\n\t\tc.Assert(out, DeepEquals, tc.out)\n\t}\n}\n\nfunc (s *ProvisionerTestSuite) TestGetDriverNameFromUVSC(c *C) {\n\tfor _, tc := range []struct {\n\t\tvsc     unstructured.Unstructured\n\t\tversion string\n\t\tout     string\n\t}{\n\t\t{ // beta success\n\t\t\tvsc: unstructured.Unstructured{\n\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\"driver\": \"drivername\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tversion: \"snapshot.storage.k8s.io/v1\",\n\t\t\tout:     \"drivername\",\n\t\t},\n\t\t{ // key missing\n\t\t\tvsc: unstructured.Unstructured{\n\t\t\t\tObject: map[string]interface{}{},\n\t\t\t},\n\t\t\tversion: \"snapshot.storage.k8s.io/v1\",\n\t\t\tout:     \"\",\n\t\t},\n\t\t{ // type conversion\n\t\t\tvsc: unstructured.Unstructured{\n\t\t\t\tObject: map[string]interface{}{\n\t\t\t\t\t\"driver\": int64(1),\n\t\t\t\t},\n\t\t\t},\n\t\t\tversion: \"snapshot.storage.k8s.io/v1\",\n\t\t\tout:     \"\",\n\t\t},\n\t} {\n\t\tp := &Kubestr{}\n\t\tout := p.getDriverNameFromUVSC(tc.vsc, tc.version)\n\t\tc.Assert(out, Equals, tc.out)\n\t}\n}\n\n// func (s *ProvisionerTestSuite) TestGetDriverStats(c *C) {\n// \tvar snapshotCount int\n// \tvar expansionCount int\n// \tvar cloningCount int\n// \tfeatureMap := make(map[string]struct{})\n// \tfor _, driver := range CSIDriverList {\n// \t\tif strings.Contains(\"Snapshot\", driver.Features) {\n// \t\t\tsnapshotCount++\n// \t\t}\n// \t\tif strings.Contains(\"Expansion\", driver.Features) {\n// \t\t\texpansionCount++\n// \t\t}\n// \t\tif strings.Contains(\"Cloning\", driver.Features) {\n// \t\t\tcloningCount++\n// \t\t}\n// \t\tfeatureMap[driver.Features] = struct{}{}\n// \t}\n// \tc.Log(\"totalcsidrivers: \", len(CSIDriverList))\n// \tc.Log(\"snapshotCount: \", snapshotCount)\n// \tc.Log(\"expansionCount: \", expansionCount)\n// \tc.Log(\"cloningCount: \", cloningCount)\n// \tc.Log(\"unique combinations: \", len(featureMap))\n// \tc.Assert(true, Equals, false)\n// }\n"
  },
  {
    "path": "pkg/kubestr/utils.go",
    "content": "package kubestr\n\nimport (\n\t\"fmt\"\n\t\"os\"\n)\n\nconst (\n\t// ErrorColor formatted color red\n\tErrorColor = \"\\033[1;31m%s\\033[0m\"\n\t// SuccessColor formatted color green\n\tSuccessColor = \"\\033[1;32m%s\\033[0m\"\n\t// YellowColor formatted color yellow\n\tYellowColor = \"\\033[1;33m%s\\033[0m\"\n)\n\n// Status is a generic structure to return a status\ntype Status struct {\n\tStatusCode    StatusCode\n\tStatusMessage string\n\tRaw           interface{} `json:\",omitempty\"`\n}\n\n// StatusCode type definition\ntype StatusCode string\n\nconst (\n\t// StatusOK is the success status code\n\tStatusOK = StatusCode(\"OK\")\n\t// StatusWarning is the informational status code\n\tStatusWarning = StatusCode(\"Warning\")\n\t// StatusError is the failure status code\n\tStatusError = StatusCode(\"Error\")\n\t// StatusInfo is the Info status code\n\tStatusInfo = StatusCode(\"Info\")\n)\n\n// Print prints a status message with a given prefix\nfunc (s *Status) Print(prefix string) {\n\tswitch s.StatusCode {\n\tcase StatusOK:\n\t\tprintSuccessMessage(prefix + s.StatusMessage)\n\tcase StatusError:\n\t\tprintErrorMessage(prefix + s.StatusMessage)\n\tcase StatusWarning:\n\t\tprintWarningMessage(prefix + s.StatusMessage)\n\tdefault:\n\t\tprintInfoMessage(prefix + s.StatusMessage)\n\t}\n}\n\n// printErrorMessage prints the error message\nfunc printErrorMessage(errorMesg string) {\n\tfmt.Printf(\"%s  -  \", errorMesg)\n\tfmt.Printf(ErrorColor, \"Error\")\n\tfmt.Println()\n}\n\n// printSuccessMessage prints the success message\nfunc printSuccessMessage(message string) {\n\tfmt.Printf(\"%s  -  \", message)\n\tfmt.Printf(SuccessColor, \"OK\")\n\tfmt.Println()\n}\n\nfunc printSuccessColor(message string) {\n\tfmt.Printf(SuccessColor, message)\n\tfmt.Println()\n}\n\n// printInfoMessage prints a warning\nfunc printInfoMessage(message string) {\n\tfmt.Println(message)\n}\n\n// printWarningMessage prints a warning\nfunc printWarningMessage(message string) {\n\tfmt.Printf(YellowColor+\"\\n\", message)\n}\n\n// TestOutput is the generic return value for tests\ntype TestOutput struct {\n\tTestName string\n\tStatus   []Status\n\tRaw      interface{} `json:\",omitempty\"`\n}\n\n// Print prints a TestRetVal as a string output\nfunc (t *TestOutput) Print() {\n\tfmt.Println(t.TestName + \":\")\n\tfor _, status := range t.Status {\n\t\tstatus.Print(\"  \")\n\t}\n}\n\nfunc MakeTestOutput(testname string, code StatusCode, mesg string, raw interface{}) *TestOutput {\n\treturn &TestOutput{\n\t\tTestName: testname,\n\t\tStatus:   []Status{makeStatus(code, mesg, nil)},\n\t\tRaw:      raw,\n\t}\n}\n\nfunc makeStatus(code StatusCode, mesg string, raw interface{}) Status {\n\treturn Status{\n\t\tStatusCode:    code,\n\t\tStatusMessage: mesg,\n\t\tRaw:           raw,\n\t}\n}\n\nfunc convertSetToSlice(set map[string]struct{}) []string {\n\tvar slice []string\n\tfor i := range set {\n\t\tslice = append(slice, i)\n\t}\n\treturn slice\n}\n\n// getPodNamespace gets the pods namespace or returns default\nfunc getPodNamespace() string {\n\tif val, ok := os.LookupEnv(PodNamespaceEnvKey); ok {\n\t\treturn val\n\t}\n\treturn DefaultNS\n}\n"
  },
  {
    "path": "scripts/load_csi_provisioners.sh",
    "content": "#!/usr/bin/env bash\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\nCLEANSED_STR=\"\"\ncleanse_str() {\n  case \"$1\" in\n    \"org.democratic-csi.[X]\") CLEANSED_STR=\"org.democratic-csi\" ;;\n    \"[x].ember-csi.io\") CLEANSED_STR=\"ember-csi.io\" ;;\n    *) CLEANSED_STR=\"$1\"\n  esac\n}\n\ncurrent_directory=$(dirname \"$0\")\n# The Driver information is scraped from the `Production Drivers` table on this page\ncurl https://raw.githubusercontent.com/kubernetes-csi/docs/master/book/src/drivers.md -o ${current_directory}/../extra/csi-drivers\n\ncat <<EOT >> ${current_directory}/../extra/csi-drivers-temp.go\npackage kubestr\n\n// THIS FILE IS AUTO_GENERATED.\n// To generate file run \"go generate\" at the top level\n// This file must be checked in.\n\nEOT\n\n# The `Production Drivers` table has 8 columns as of now,\n# with the last column of `Other Features` skipped for quite a few of the drivers.\nMIN_COLS_PROD_DRIVERS=7\n\necho \"var CSIDriverList = []*CSIDriver{\" >> ${current_directory}/../extra/csi-drivers-temp.go\nwhile read p; do\n  if [[ $p == [* ]]; then\n    IFS='|'\n    read -a fields <<< \"$p\"\n    if [[ ${#fields[@]} -lt $MIN_COLS_PROD_DRIVERS ]]; then\n      echo skipping \"${fields[0]}\"\n      continue\n    fi\n\n    name_url=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[0]})\n    driver_name=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[1]} | sed 's/`//g')\n    versions=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[2]})\n    description=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[3]})\n    persistence=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[4]})\n    access_modes=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[5]}| sed 's/\"//g')\n    dynamic_provisioning=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[6]})\n\n    if [[ ${#fields[@]} -gt $MIN_COLS_PROD_DRIVERS ]]; then\n      features=$(sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<<${fields[7]})\n    fi\n\n    cleanse_str \"${driver_name}\"\n    driver_name=\"${CLEANSED_STR}\"\n\n    echo \"{NameUrl: \\\"$name_url\\\", DriverName: \\\"$driver_name\\\", Versions: \\\"$versions\\\", Description: \\\"$description\\\", Persistence: \\\"$persistence\\\", AccessModes: \\\"$access_modes\\\", DynamicProvisioning: \\\"$dynamic_provisioning\\\", Features: \\\"$features\\\"},\" >> ${current_directory}/../extra/csi-drivers-temp.go\n  fi\ndone <${current_directory}/../extra/csi-drivers\necho \"}\" >> ${current_directory}/../extra/csi-drivers-temp.go\n\ngofmt ${current_directory}/../extra/csi-drivers-temp.go > ${current_directory}/../pkg/kubestr/csi-drivers.go\nrm ${current_directory}/../extra/csi-drivers-temp.go\n"
  }
]