[
  {
    "path": ".github/ISSUE_TEMPLATE/bug-report.yaml",
    "content": "name: 🐛 Bug report\ndescription: Report a bug to help us improve selefra\ntitle: \"[Bug]: \"\nlabels: [bug]\nassignees:\n  - selefra-bot\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Thanks for taking the time to fill out this bug report!\n  - type: input\n    id: contact\n    attributes:\n      label: Contact Details\n      description: How can we get in touch with you if we need more info?\n      placeholder: ex. email@example.com\n    validations:\n      required: false\n  - type: input\n    id: version\n    attributes:\n      label: Version\n      description: Please enter the project version you are currently using?\n      placeholder: 0.0.5\n    validations:\n      required: true\n  - type: textarea\n    id: what-happened\n    attributes:\n      label: What happened?\n      description: Also tell us, what did you expect to happen?\n      placeholder: Tell us what you see!\n      value: \"A bug happened!\"\n    validations:\n      required: true\n  - type: textarea\n    id: logs\n    attributes:\n      label: Relevant log output\n      description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.\n      render: shell\n  - type: textarea\n    id: advise\n    attributes:\n      label: Proposed changes\n      description: You can provide us with good suggestions or modification plans."
  },
  {
    "path": ".github/ISSUE_TEMPLATE/config.yaml",
    "content": "blank_issues_enabled: true\ncontact_links:\n  - name: Ask a Question on Selefra Community\n    url: https://www.selefra.io/community/join\n    about: Join Selefra Community Slack to discuss Selefra with other community members.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature-request.yaml",
    "content": "name: 🚀 Feature Request\ndescription: Suggest a new feature or improvement\ntitle: \"[Feature]: \"\nlabels: [feature]\nassignees:\n  - selefra-bot\nbody:\n  - type: markdown\n    attributes:\n      value: |\n        Thanks for submitting a feature request! Please fill out the details below to help us understand your suggestion.\n  - type: dropdown\n    id: Priority\n    attributes:\n      label: Priority\n      description: Select the priority of this feature request.\n      options:\n        - Low\n        - Medium\n        - High\n        - Critical\n    validations:\n      required: true\n  - type: input\n    id: contact\n    attributes:\n      label: Contact Details\n      description: How can we get in touch with you if we need more info?\n      placeholder: ex. email@example.com\n    validations:\n      required: false\n  - type: input\n    id: summary\n    attributes:\n      label: Summary\n      description: Briefly describe the feature you are requesting.\n      placeholder: ex. \"Add support for custom themes\"\n    validations:\n      required: true\n  - type: textarea\n    id: description\n    attributes:\n      label: Description\n      description: Provide a detailed description of the feature, including any relevant use cases or scenarios.\n      placeholder: \"As a user, I would like to be able to...\"\n    validations:\n      required: true\n  - type: textarea\n    id: benefit\n    attributes:\n      label: Benefit\n      description: Explain how this feature would benefit other users or improve the overall experience.\n      placeholder: \"This feature would make it easier for users to...\"\n    validations:\n      required: true"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "## Description\nBriefly describe the changes and improvements you have made.\n\n## Related Issues or Feature Requests\nList any GitHub issues or feature requests that are related to this PR.\n\n## Testing Strategy\nDescribe your testing strategy, including the scope of your tests, testing methods, and any other relevant details.\n\n## Screenshots\nIf you have made any UI changes, please provide screenshots here so that other contributors or project owners can quickly understand your changes.\n\n## Dependencies\nIf your changes depend on any libraries, plugins, or tools, please list them here.\n\n## Additional Information\nProvide any additional information about your contribution, including your testing environment, any issues you encountered and how you solved them, and anything else that you think would be helpful to know.\n\n## License\nYour contribution is subject to the project's license agreement. Please specify the license agreement that applies to your contribution."
  },
  {
    "path": ".github/workflows/ci.yaml",
    "content": "name: CI\n\non:\n  pull_request:\n  push:\n\njobs:\n  selefra_test:\n    name: go test\n    runs-on: ubuntu-latest\n    services:\n      selefra_postgres:\n        image: postgres\n        ports:\n          - 5432:5432\n        env:\n          POSTGRES_PASSWORD: pass\n    steps:\n      - uses: actions/checkout@v3\n        with:\n          fetch-depth: 0\n      - name: Use Golang\n        uses: actions/setup-go@v1\n        with:\n          go-version: 1.19\n      - name: test\n        env:\n          AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}\n          AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}\n          TZ: Asia/Shanghai\n        run: go mod tidy && go test -short -timeout 3600s ./cmd/apply && go test -short -timeout 3600s ./cmd/provider\n  selefra_upload:\n    if: github.ref == 'refs/heads/main' && github.event_name == 'push'\n    name: Pull Code\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v3\n        with:\n          fetch-depth: 0\n\n      - name: Unshallow\n        run: git fetch\n\n      - name: Use Golang\n        uses: actions/setup-go@v1\n        with:\n          go-version: 1.19\n\n      - name: Run GoReleaser\n        uses: goreleaser/goreleaser-action@v3\n        with:\n          version: latest\n          args: release --rm-dist\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n\n      - name: Upload Assets\n        uses: actions/upload-artifact@v3\n        with:\n          name: selefra\n          path: |\n            dist/*.zip\n            dist/*.tar.gz\n\n      # - uses: keithweaver/aws-s3-github-action@v1.0.0\n      #   name: Copy Folder\n      #   with:\n      #     command: cp\n      #     source: ./dist/\n      #     destination: s3://dev-www.selefra.io/app-selefra/\n      #     aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}\n      #     aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}\n      #     aws_region: us-east-1\n      #     flags: --recursive"
  },
  {
    "path": ".github/workflows/release.yaml",
    "content": "name: release\n\non:\n  release:\n    types: [ created, edited ]\n\njobs:\n  selefra_release:\n    name: Build\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout\n        uses: actions/checkout@v3\n        with:\n          ref: ${{ github.event.inputs.branch }}\n      - name: Unshallow\n        run: git fetch\n      - name: Use Golang\n        uses: actions/setup-go@v1\n        with:\n          go-version: 1.19\n      - name: git\n        run: git config --global url.https://${{ secrets.SELEFRA_TOKEN }}@github.com/.insteadOf https://github.com/\n\n      # already replaced by compiler injection\n      #      - name: Get the release version\n      #        id: get_version\n      #        run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/}\n      #      - name: Set Version\n      #        run: |\n      #          sed -i \"s#{{version}}#${{ steps.get_version.outputs.VERSION }}#g\" cmd/version/version.go\n      #          cat cmd/version/version.go\n\n      - name: Run GoReleaser\n        if: \"github.event.release.prerelease\"\n        uses: goreleaser/goreleaser-action@v3\n        with:\n          version: latest\n          args: release --skip-publish --skip-validate\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n          SELEFRA_TELEMETRY_TOKEN: ${{ secrets.SELEFRA_TELEMETRY_TOKEN }}\n\n      - name: Release\n        if: \"github.event.release.prerelease\"\n        uses: softprops/action-gh-release@v1\n        with:\n          files: |\n            dist/*checksums.txt\n            dist/*.tar.gz\n            dist/*.zip\n          prerelease: true\n          generate_release_notes: true\n          token: ${{ secrets.GITHUB_TOKEN }}\n        env:\n          SELEFRA_TELEMETRY_TOKEN: ${{ secrets.SELEFRA_TELEMETRY_TOKEN }}\n\n      - name: Run GoReleaser\n        if: \"!github.event.release.prerelease\"\n        uses: goreleaser/goreleaser-action@v3\n        with:\n          version: latest\n          args: release --rm-dist --skip-validate\n        env:\n          GITHUB_TOKEN: ${{ secrets.SELEFRA_TOKEN }}\n          SELEFRA_TELEMETRY_TOKEN: ${{ secrets.SELEFRA_TELEMETRY_TOKEN }}"
  },
  {
    "path": ".gitignore",
    "content": ".idea/*\n.DS_Store\n*.log\n*.exe\n**/test_download/*\n**/*.log\ntest\n"
  },
  {
    "path": ".goreleaser.yml",
    "content": "project_name: selefra\nbuilds:\n  - env: [ CGO_ENABLED=0 ]\n    goos:\n      - linux\n      - windows\n      - darwin\n    goarch:\n      - amd64\n      - arm64\n    ignore:\n      - goos: windows\n        goarch: arm64\n\n    id: \"selefra\"\n    binary:\n      'selefra'\n    ldflags:\n      - -X 'github.com/selefra/selefra/cmd/version.Version={{.Version}}'\n      - -X 'github.com/selefra/selefra/pkg/cli_env.SelefraTelemetryToken={{.Env.SELEFRA_TELEMETRY_TOKEN}}'\n\narchives:\n  - files:\n      - none*\n    format: zip\n    id: homebrew\n    name_template: \"{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}\"\n    format_overrides:\n      - goos: linux\n        format: tar.gz\n\nbrews:\n  - ids:\n      - homebrew\n    name: selefra\n    tap:\n      owner: selefra\n      name: homebrew-tap\n      branch: main\n    folder: Formula\n    url_template: \"https://github.com/selefra/selefra/releases/download/{{ .Tag }}/{{ .ArtifactName }}\"\n    homepage: \"https://selefra.io/\"\n    description: \"Selefra - Infrastructure as Code for Infrastructure Analysis.\"\n    skip_upload: auto\n    install: |-\n      bin.install \"selefra\"\n\n\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "# Contributor Code of Conduct\n\nAs contributors and maintainers of this project, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities.\n\nWe are committed to making participation in this project a harassment-free experience for everyone, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, age, or religion.\n\nExamples of unacceptable behavior by participants include the use of sexual language or imagery, derogatory comments or personal attacks, trolling, public or private harassment, insults, or other unprofessional conduct.\n\nProject maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct. Project maintainers who do not follow the Code of Conduct may be removed from the project team.\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be reported by opening an issue or contacting one or more of the project maintainers.\n\nThis Code of Conduct is adapted from the [Contributor Covenant](http:contributor-covenant.org), version 1.0.0, available at [http://contributor-covenant.org/version/1/0/0/](http://contributor-covenant.org/version/1/0/0/)\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "<!-- Your Title -->\n<p align=\"center\">\n<a href=\"https://www.selefra.io/\" target=\"_blank\">\n<picture><source media=\"(prefers-color-scheme: dark)\" srcset=\"https://user-images.githubusercontent.com/124020340/225567784-61adb5e7-06ae-402a-9907-69c1e6f1aa9e.png\"><source media=\"(prefers-color-scheme: light)\" srcset=\"https://user-images.githubusercontent.com/124020340/224677116-44ae9c6c-a543-4813-9ef3-c7cbcacd2fbe.png\"><img width=\"400px\" alt=\"Steampipe Logo\" src=\"https://user-images.githubusercontent.com/124020340/224677116-44ae9c6c-a543-4813-9ef3-c7cbcacd2fbe.png\"></picture>\n<a/>\n</p>\n\n<!-- Description -->\n  <p align=\"center\">\n    <i>Selefra is an open-source policy-as-code software that provides analytics for multi-cloud and SaaS.</i>\n  </p>\n  \n  <!-- Badges -->\n<p align=\"center\">   \n<img alt=\"go\" src=\"https://img.shields.io/badge/go-1.19-1E90FF\"></a>\n<a href=\"https://github.com/selefra/selefra\"><img alt=\"Total\" src=\"https://img.shields.io/github/downloads/selefra/selefra/total?logo=github\"></a>\n<a href=\"https://github.com/selefra/selefra/blob/master/LICENSE\"><img alt=\"GitHub license\" src=\"https://img.shields.io/github/license/selefra/selefra?style=social\"></a>\n  </p>\n  \n  <!-- Badges -->\n  <p align=\"center\">\n<a href=\"https://selefra.io/community/join\"><img src=\"https://img.shields.io/badge/-Slack-424549?style=social&logo=Slack\" height=25></a>\n    &nbsp;\n    <a href=\"https://twitter.com/SelefraCorp\"><img src=\"https://img.shields.io/badge/-Twitter-red?style=social&logo=twitter\" height=25></a>\n    &nbsp;\n    <a href=\"https://www.reddit.com/r/Selefra\"><img src=\"https://img.shields.io/badge/-Reddit-red?style=social&logo=reddit\" height=25></a>\n    &nbsp;\n    <a href=\"https://selefra.medium.com/\"><img src=\"https://img.shields.io/badge/-Medium-red?style=social&logo=medium\" height=25></a>\n\n  </p>\n  \n<p align=\"center\">\n  <img src=\"https://user-images.githubusercontent.com/124020340/225897757-188f1a50-2efa-4a9e-9199-7cb7f68485be.png\">\n</p>\n<br/>\n\n## Contributing to Selefra\n\nWelcome aboard Selefra! First thing first, thank you for contributing to selefra! \n\n### Code of Conduct \n\nWe value each and every member, make sure to take some time and read the [Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md) to help maintain a productive and friendly community.\n\n### Selefra Architecture\n\n#### Overview\n\nSelefra is a project that consists of four main components: CLI, SDK, Provider, and Module. The CLI provides the interface for users to interact with the Selefra system. The SDK provides the necessary tools and capabilities for the CLI and Provider to communicate with each other. The Provider is responsible for detecting certain conditions or issues, while the Module stores the standards and rules for the Provider to use.\n\n- CLI\n\n  The CLI component is the user-facing part of the Selefra system. It provides a command-line interface for users to interact with the system and run various commands. The CLI communicates with the SDK to initiate detection processes and receive results from the Provider.\n\n- SDK\n\n  The SDK is the foundation of the Selefra project, providing the necessary tools and capabilities for the CLI and Provider to communicate with each other. It includes a set of APIs and libraries that allow developers to integrate their applications with Selefra easily. The SDK also handles all the communication and data transfer between the CLI and the Provider.\n\n- Provider\n\n  The Provider is responsible for detecting specific conditions or issues and reporting them back to the CLI. It interacts with the SDK and the Module to perform its tasks. The Provider is designed to be modular and extensible, meaning that developers can add their own detection capabilities by creating new Provider modules.\n\n- Module\n\n  The Module is where the detection standards and rules are stored. It provides the data that the Provider needs to perform its tasks. The Module is designed to be flexible, allowing developers to customize or extend it based on their needs.\n\n### Provider Registry\n\nThe Provider Registry is a Registry Services directory of domain-specific services that are exposed by products. If you want us to add a new plugin or resource please open an [Issue](https://github.com/selefra/selefra/issues).\n\n### Submitting PR\n\n1. Fork the repository to your own GitHub account.\n2. Clone the project to your machine.\n3. Create a new branch to work on. Branch from `develop` if it exists, else from `main`.\n4. Make your changes and commit them. Make sure your commits are concise and descriptive.\n5. Push your changes to your GitHub account.\n6. Create a Pull Request (PR) from your branch to the `develop` branch in the main repository.\n\n\n## Community\n\nSelefra is a community-driven project, we welcome you to open a [GitHub Issue](https://github.com/selefra/selefra/issues/new/choose) to report a bug, suggest an improvement, or request new feature.\n\n-  Join <a href=\"https://selefra.io/community/join\"><img height=\"16\" alt=\"humanitarian\" src=\"https://user-images.githubusercontent.com/124020340/225563969-3f3d4c45-fb3f-4932-831d-01ab9e59c921.png\"></a> [Selefra Community](https://selefra.io/community/join) on Slack. We host `Community Hour` for tutorials and Q&As on regular basis.\n-  Follow us on <a href=\"https://twitter.com/SelefraCorp\"><img height=\"16\" alt=\"humanitarian\" src=\"https://user-images.githubusercontent.com/124020340/225564426-82f5afbc-5638-4123-871d-fec6fdc6457f.png\"></a> [Twitter](https://twitter.com/SelefraCorp) and share your thoughts！\n-  Email us at <a href=\"support@selefra.io\"><img height=\"16\" alt=\"humanitarian\" src=\"https://user-images.githubusercontent.com/124020340/225564710-741dc841-572f-4cde-853c-5ebaaf4d3d3c.png\"></a>&nbsp;support@selefra.io\n\n## License\n\n[Mozilla Public License v2.0](https://github.com/selefra/selefra/blob/main/LICENSE)\n\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM golang:1.20\n\nENV DEBIAN_FRONTEND noninteractive\n\nRUN apt-get update && apt-get install -y postgresql postgresql-contrib\n\nENV POSTGRES_USER postgres\nENV POSTGRES_PASSWORD pass\n\nENV POSTGRES_PORT 15432\n\nENV SELEFRA_DATABASE_DSN 'host=127.0.0.1 user=postgres password=pass port=15432 dbname=postgres sslmode=disable'\n\nCOPY . /selefra\n\nWORKDIR /selefra\n\nRUN go env -w GOPROXY=https://goproxy.cn,direct\n\nRUN go mod tidy\n\nRUN go build -o selefra\n\nRUN mv selefra /usr/local/bin/\n\nEXPOSE 15432"
  },
  {
    "path": "LICENSE",
    "content": "Mozilla Public License Version 2.0\n==================================\n\n1. Definitions\n--------------\n\n1.1. \"Contributor\"\n    means each individual or legal entity that creates, contributes to\n    the creation of, or owns Covered Software.\n\n1.2. \"Contributor Version\"\n    means the combination of the Contributions of others (if any) used\n    by a Contributor and that particular Contributor's Contribution.\n\n1.3. \"Contribution\"\n    means Covered Software of a particular Contributor.\n\n1.4. \"Covered Software\"\n    means Source Code Form to which the initial Contributor has attached\n    the notice in Exhibit A, the Executable Form of such Source Code\n    Form, and Modifications of such Source Code Form, in each case\n    including portions thereof.\n\n1.5. \"Incompatible With Secondary Licenses\"\n    means\n\n    (a) that the initial Contributor has attached the notice described\n        in Exhibit B to the Covered Software; or\n\n    (b) that the Covered Software was made available under the terms of\n        version 1.1 or earlier of the License, but not also under the\n        terms of a Secondary License.\n\n1.6. \"Executable Form\"\n    means any form of the work other than Source Code Form.\n\n1.7. \"Larger Work\"\n    means a work that combines Covered Software with other material, in\n    a separate file or files, that is not Covered Software.\n\n1.8. \"License\"\n    means this document.\n\n1.9. \"Licensable\"\n    means having the right to grant, to the maximum extent possible,\n    whether at the time of the initial grant or subsequently, any and\n    all of the rights conveyed by this License.\n\n1.10. \"Modifications\"\n    means any of the following:\n\n    (a) any file in Source Code Form that results from an addition to,\n        deletion from, or modification of the contents of Covered\n        Software; or\n\n    (b) any new file in Source Code Form that contains any Covered\n        Software.\n\n1.11. \"Patent Claims\" of a Contributor\n    means any patent claim(s), including without limitation, method,\n    process, and apparatus claims, in any patent Licensable by such\n    Contributor that would be infringed, but for the grant of the\n    License, by the making, using, selling, offering for sale, having\n    made, import, or transfer of either its Contributions or its\n    Contributor Version.\n\n1.12. \"Secondary License\"\n    means either the GNU General Public License, Version 2.0, the GNU\n    Lesser General Public License, Version 2.1, the GNU Affero General\n    Public License, Version 3.0, or any later versions of those\n    licenses.\n\n1.13. \"Source Code Form\"\n    means the form of the work preferred for making modifications.\n\n1.14. \"You\" (or \"Your\")\n    means an individual or a legal entity exercising rights under this\n    License. For legal entities, \"You\" includes any entity that\n    controls, is controlled by, or is under common control with You. For\n    purposes of this definition, \"control\" means (a) the power, direct\n    or indirect, to cause the direction or management of such entity,\n    whether by contract or otherwise, or (b) ownership of more than\n    fifty percent (50%) of the outstanding shares or beneficial\n    ownership of such entity.\n\n2. License Grants and Conditions\n--------------------------------\n\n2.1. Grants\n\nEach Contributor hereby grants You a world-wide, royalty-free,\nnon-exclusive license:\n\n(a) under intellectual property rights (other than patent or trademark)\n    Licensable by such Contributor to use, reproduce, make available,\n    modify, display, perform, distribute, and otherwise exploit its\n    Contributions, either on an unmodified basis, with Modifications, or\n    as part of a Larger Work; and\n\n(b) under Patent Claims of such Contributor to make, use, sell, offer\n    for sale, have made, import, and otherwise transfer either its\n    Contributions or its Contributor Version.\n\n2.2. Effective Date\n\nThe licenses granted in Section 2.1 with respect to any Contribution\nbecome effective for each Contribution on the date the Contributor first\ndistributes such Contribution.\n\n2.3. Limitations on Grant Scope\n\nThe licenses granted in this Section 2 are the only rights granted under\nthis License. No additional rights or licenses will be implied from the\ndistribution or licensing of Covered Software under this License.\nNotwithstanding Section 2.1(b) above, no patent license is granted by a\nContributor:\n\n(a) for any code that a Contributor has removed from Covered Software;\n    or\n\n(b) for infringements caused by: (i) Your and any other third party's\n    modifications of Covered Software, or (ii) the combination of its\n    Contributions with other software (except as part of its Contributor\n    Version); or\n\n(c) under Patent Claims infringed by Covered Software in the absence of\n    its Contributions.\n\nThis License does not grant any rights in the trademarks, service marks,\nor logos of any Contributor (except as may be necessary to comply with\nthe notice requirements in Section 3.4).\n\n2.4. Subsequent Licenses\n\nNo Contributor makes additional grants as a result of Your choice to\ndistribute the Covered Software under a subsequent version of this\nLicense (see Section 10.2) or under the terms of a Secondary License (if\npermitted under the terms of Section 3.3).\n\n2.5. Representation\n\nEach Contributor represents that the Contributor believes its\nContributions are its original creation(s) or it has sufficient rights\nto grant the rights to its Contributions conveyed by this License.\n\n2.6. Fair Use\n\nThis License is not intended to limit any rights You have under\napplicable copyright doctrines of fair use, fair dealing, or other\nequivalents.\n\n2.7. Conditions\n\nSections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted\nin Section 2.1.\n\n3. Responsibilities\n-------------------\n\n3.1. Distribution of Source Form\n\nAll distribution of Covered Software in Source Code Form, including any\nModifications that You create or to which You contribute, must be under\nthe terms of this License. You must inform recipients that the Source\nCode Form of the Covered Software is governed by the terms of this\nLicense, and how they can obtain a copy of this License. You may not\nattempt to alter or restrict the recipients' rights in the Source Code\nForm.\n\n3.2. Distribution of Executable Form\n\nIf You distribute Covered Software in Executable Form then:\n\n(a) such Covered Software must also be made available in Source Code\n    Form, as described in Section 3.1, and You must inform recipients of\n    the Executable Form how they can obtain a copy of such Source Code\n    Form by reasonable means in a timely manner, at a charge no more\n    than the cost of distribution to the recipient; and\n\n(b) You may distribute such Executable Form under the terms of this\n    License, or sublicense it under different terms, provided that the\n    license for the Executable Form does not attempt to limit or alter\n    the recipients' rights in the Source Code Form under this License.\n\n3.3. Distribution of a Larger Work\n\nYou may create and distribute a Larger Work under terms of Your choice,\nprovided that You also comply with the requirements of this License for\nthe Covered Software. If the Larger Work is a combination of Covered\nSoftware with a work governed by one or more Secondary Licenses, and the\nCovered Software is not Incompatible With Secondary Licenses, this\nLicense permits You to additionally distribute such Covered Software\nunder the terms of such Secondary License(s), so that the recipient of\nthe Larger Work may, at their option, further distribute the Covered\nSoftware under the terms of either this License or such Secondary\nLicense(s).\n\n3.4. Notices\n\nYou may not remove or alter the substance of any license notices\n(including copyright notices, patent notices, disclaimers of warranty,\nor limitations of liability) contained within the Source Code Form of\nthe Covered Software, except that You may alter any license notices to\nthe extent required to remedy known factual inaccuracies.\n\n3.5. Application of Additional Terms\n\nYou may choose to offer, and to charge a fee for, warranty, support,\nindemnity or liability obligations to one or more recipients of Covered\nSoftware. However, You may do so only on Your own behalf, and not on\nbehalf of any Contributor. You must make it absolutely clear that any\nsuch warranty, support, indemnity, or liability obligation is offered by\nYou alone, and You hereby agree to indemnify every Contributor for any\nliability incurred by such Contributor as a result of warranty, support,\nindemnity or liability terms You offer. You may include additional\ndisclaimers of warranty and limitations of liability specific to any\njurisdiction.\n\n4. Inability to Comply Due to Statute or Regulation\n---------------------------------------------------\n\nIf it is impossible for You to comply with any of the terms of this\nLicense with respect to some or all of the Covered Software due to\nstatute, judicial order, or regulation then You must: (a) comply with\nthe terms of this License to the maximum extent possible; and (b)\ndescribe the limitations and the code they affect. Such description must\nbe placed in a text file included with all distributions of the Covered\nSoftware under this License. Except to the extent prohibited by statute\nor regulation, such description must be sufficiently detailed for a\nrecipient of ordinary skill to be able to understand it.\n\n5. Termination\n--------------\n\n5.1. The rights granted under this License will terminate automatically\nif You fail to comply with any of its terms. However, if You become\ncompliant, then the rights granted under this License from a particular\nContributor are reinstated (a) provisionally, unless and until such\nContributor explicitly and finally terminates Your grants, and (b) on an\nongoing basis, if such Contributor fails to notify You of the\nnon-compliance by some reasonable means prior to 60 days after You have\ncome back into compliance. Moreover, Your grants from a particular\nContributor are reinstated on an ongoing basis if such Contributor\nnotifies You of the non-compliance by some reasonable means, this is the\nfirst time You have received notice of non-compliance with this License\nfrom such Contributor, and You become compliant prior to 30 days after\nYour receipt of the notice.\n\n5.2. If You initiate litigation against any entity by asserting a patent\ninfringement claim (excluding declaratory judgment actions,\ncounter-claims, and cross-claims) alleging that a Contributor Version\ndirectly or indirectly infringes any patent, then the rights granted to\nYou by any and all Contributors for the Covered Software under Section\n2.1 of this License shall terminate.\n\n5.3. In the event of termination under Sections 5.1 or 5.2 above, all\nend user license agreements (excluding distributors and resellers) which\nhave been validly granted by You or Your distributors under this License\nprior to termination shall survive termination.\n\n************************************************************************\n*                                                                      *\n*  6. Disclaimer of Warranty                                           *\n*  -------------------------                                           *\n*                                                                      *\n*  Covered Software is provided under this License on an \"as is\"       *\n*  basis, without warranty of any kind, either expressed, implied, or  *\n*  statutory, including, without limitation, warranties that the       *\n*  Covered Software is free of defects, merchantable, fit for a        *\n*  particular purpose or non-infringing. The entire risk as to the     *\n*  quality and performance of the Covered Software is with You.        *\n*  Should any Covered Software prove defective in any respect, You     *\n*  (not any Contributor) assume the cost of any necessary servicing,   *\n*  repair, or correction. This disclaimer of warranty constitutes an   *\n*  essential part of this License. No use of any Covered Software is   *\n*  authorized under this License except under this disclaimer.         *\n*                                                                      *\n************************************************************************\n\n************************************************************************\n*                                                                      *\n*  7. Limitation of Liability                                          *\n*  --------------------------                                          *\n*                                                                      *\n*  Under no circumstances and under no legal theory, whether tort      *\n*  (including negligence), contract, or otherwise, shall any           *\n*  Contributor, or anyone who distributes Covered Software as          *\n*  permitted above, be liable to You for any direct, indirect,         *\n*  special, incidental, or consequential damages of any character      *\n*  including, without limitation, damages for lost profits, loss of    *\n*  goodwill, work stoppage, computer failure or malfunction, or any    *\n*  and all other commercial damages or losses, even if such party      *\n*  shall have been informed of the possibility of such damages. This   *\n*  limitation of liability shall not apply to liability for death or   *\n*  personal injury resulting from such party's negligence to the       *\n*  extent applicable law prohibits such limitation. Some               *\n*  jurisdictions do not allow the exclusion or limitation of           *\n*  incidental or consequential damages, so this exclusion and          *\n*  limitation may not apply to You.                                    *\n*                                                                      *\n************************************************************************\n\n8. Litigation\n-------------\n\nAny litigation relating to this License may be brought only in the\ncourts of a jurisdiction where the defendant maintains its principal\nplace of business and such litigation shall be governed by laws of that\njurisdiction, without reference to its conflict-of-law provisions.\nNothing in this Section shall prevent a party's ability to bring\ncross-claims or counter-claims.\n\n9. Miscellaneous\n----------------\n\nThis License represents the complete agreement concerning the subject\nmatter hereof. If any provision of this License is held to be\nunenforceable, such provision shall be reformed only to the extent\nnecessary to make it enforceable. Any law or regulation which provides\nthat the language of a contract shall be construed against the drafter\nshall not be used to construe this License against a Contributor.\n\n10. Versions of the License\n---------------------------\n\n10.1. New Versions\n\nMozilla Foundation is the license steward. Except as provided in Section\n10.3, no one other than the license steward has the right to modify or\npublish new versions of this License. Each version will be given a\ndistinguishing version number.\n\n10.2. Effect of New Versions\n\nYou may distribute the Covered Software under the terms of the version\nof the License under which You originally received the Covered Software,\nor under the terms of any subsequent version published by the license\nsteward.\n\n10.3. Modified Versions\n\nIf you create software not governed by this License, and you want to\ncreate a new license for such software, you may create and use a\nmodified version of this License if you rename the license and remove\nany references to the name of the license steward (except to note that\nsuch modified license differs from this License).\n\n10.4. Distributing Source Code Form that is Incompatible With Secondary\nLicenses\n\nIf You choose to distribute Source Code Form that is Incompatible With\nSecondary Licenses under the terms of this version of the License, the\nnotice described in Exhibit B of this License must be attached.\n\nExhibit A - Source Code Form License Notice\n-------------------------------------------\n\n  This Source Code Form is subject to the terms of the Mozilla Public\n  License, v. 2.0. If a copy of the MPL was not distributed with this\n  file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nIf it is not possible or desirable to put the notice in a particular\nfile, then You may include the notice in a location (such as a LICENSE\nfile in a relevant directory) where a recipient would be likely to look\nfor such a notice.\n\nYou may add additional accurate notices of copyright ownership.\n\nExhibit B - \"Incompatible With Secondary Licenses\" Notice\n---------------------------------------------------------\n\n  This Source Code Form is \"Incompatible With Secondary Licenses\", as\n  defined by the Mozilla Public License, v. 2.0.\n"
  },
  {
    "path": "Makefile",
    "content": "news.ycombinator.com\ngithub.com\nfreecodecamp.org\npublickey1.jp\nthenewstack.io\napi.daily.dev\nroadmap.sh\ndev.to\nstatics.teams.cdn.office.net\nhckrnews.com\nfreestuff.dev\nmedium.com\nlibhunt.com\nitmedia.co.jp\nfullstory.lightning.force.com\ndynamitejobs.com\nlink.csdn.net\nfree-for.dev\nbluelight.co\nevents.linuxfoundation.org\nalvinashcraft.com\nactivecampaign.atlassian.net\napp.otta.com\npresearch.com\nbuild5nines.com"
  },
  {
    "path": "README.md",
    "content": "<!-- Your Title -->\n<p align=\"center\">\n<a href=\"https://www.selefra.io/\" target=\"_blank\">\n<picture><source media=\"(prefers-color-scheme: dark)\" srcset=\"https://user-images.githubusercontent.com/124020340/225567784-61adb5e7-06ae-402a-9907-69c1e6f1aa9e.png\"><source media=\"(prefers-color-scheme: light)\" srcset=\"https://user-images.githubusercontent.com/124020340/224677116-44ae9c6c-a543-4813-9ef3-c7cbcacd2fbe.png\"><img width=\"400px\" alt=\"Steampipe Logo\" src=\"https://user-images.githubusercontent.com/124020340/224677116-44ae9c6c-a543-4813-9ef3-c7cbcacd2fbe.png\"></picture>\n<a/>\n</p>\n\n<!-- Description -->\n  <p align=\"center\">\n    <i>Selefra is an open-source policy-as-code software that provides analytics for multi-cloud and SaaS.</i>\n  </p>\n\n  <!-- Badges -->\n<p align=\"center\">   \n<a href=\"https://pkg.go.dev/github.com/selefra/selefra\"><img alt=\"go\" src=\"https://img.shields.io/badge/go-1.19-1E90FF\" /></a>\n<a href=\"https://github.com/selefra/selefra\"><img alt=\"Total\" src=\"https://img.shields.io/github/downloads/selefra/selefra/total?logo=github\"></a>\n<a href=\"https://github.com/selefra/selefra/blob/master/LICENSE\"><img alt=\"GitHub license\" src=\"https://img.shields.io/github/license/selefra/selefra?style=social\"></a>\n  </p>\n\n  <!-- Badges -->\n  <p align=\"center\">\n<a href=\"https://selefra.io/community/join\"><img src=\"https://img.shields.io/badge/-Slack-424549?style=social&logo=Slack\" height=25></a>\n    &nbsp;\n    <a href=\"https://twitter.com/SelefraCorp\"><img src=\"https://img.shields.io/badge/-Twitter-red?style=social&logo=twitter\" height=25></a>\n    &nbsp;\n    <a href=\"https://www.reddit.com/r/Selefra\"><img src=\"https://img.shields.io/badge/-Reddit-red?style=social&logo=reddit\" height=25></a>\n    &nbsp;\n    <a href=\"https://selefra.medium.com/\"><img src=\"https://img.shields.io/badge/-Medium-red?style=social&logo=medium\" height=25></a>\n\n  </p>\n\n<p align=\"center\">\n  <img width=\"900\" alt=\"banner\" src=\"https://user-images.githubusercontent.com/124020340/232656647-58e2c31f-ba94-48f0-99fc-87ab660309d0.png\">\n</p>\n<br/>\n\n<!-- About Selefra -->\n\n## About Selefra\n\nSelefra means \"select * from infrastructure\". It is an open-source policy-as-code software that provides analysis for multi-cloud and SaaS environments, including over 30 services such as AWS, GCP, Azure, Alibaba Cloud, Kubernetes, Github, Cloudflare, and Slack.\n\nFor best practices and detailed instructions, refer to the Docs. Within the [Docs](https://selefra.io/docs/introduction), you will find information on installation, CLI usage, project workflow, and more guides on how to accomplish cloud inspection tasks.\n\nWith Selefra, you can engage in conversations with GPT models, which will analyze the information and provide relevant suggestions for security, cost, and architecture checks, helping you better manage their cloud resources, enhance security, reduce costs, and optimize architecture design.\n\n<img align=\"right\" width=\"570\" alt=\"img_code\" src=\"https://user-images.githubusercontent.com/124020340/232016353-67b21268-ae70-47a9-a848-cad0f2fce66f.gif\">\n\n#### 🔥 Policy as Code\n\nCustom analysis policies (security, compliance, cost) can be written through a combination of SQL and YAML.\n\n#### 💥 Configuration of Multi-Cloud, Multi-SaaS\n\nUnified multi-cloud configuration data integration capabilities that can support analysis of configuration data from any cloud service via SQL.\n\n#### 🌟 Version Control\n\nAnalysis policies can be managed through VCS such as GitHub/Gitlab.\n\n#### 🥤 Automation\n\nPolicies can be automated to enforce compliance, security, and cost optimization rules through Scheduled tasks and cloud automation tools.\n\n## Getting started\n\nRead detailed documentation for how to [Get Started](https://selefra.io/docs/get-started/) with Selefra.\n\nFor quick start, run this demo, it should take less than a few minutes:\n\n1. **Install Selefra**\n\n   For non-macOS users, [download packages](https://github.com/selefra/selefra/releases) to install Selefra.\n\n   On macOS, tap Selefra with Homebrew:\n\n    ```bash\n    brew tap selefra/tap\n    ```\n\n   Next, install Selefra:\n\n    ```bash\n    brew install selefra/tap/selefra\n    ```\n\n2. **Initialization project**\n\n    ```bash\n    mkdir selefra-demo && cd selefra-demo && selefra init\n    ```\n\n3. **Build code**\n\n    ```bash\n    selefra apply \n    ```\n   \n## 🔥 Analyze cloud resources using GPT\n\nYou can refer to the [documentation](https://selefra.io/docs/get-started#use-gpt)  to configure your OPENAPI_API_KEY in advance and start analyzing your cloud resources\n\n```bash\nselefra gpt <\"what you want to analyze\"> --openai_mode=gpt-3.5 --openai_limit=5 --openai_api_key=<Your Openai Api Key>\n```\n\n## Selefra Community Ecosystem\n\n Provider | Introduce | Status |\n | --------| ----- | ------ |\n| [AWS](https://www.selefra.io/docs/providers-connector/aws)|The AWS Provider for Selefra can be used to extract data from many of the cloud services by AWS. The provider must be configured with credentials to extract and analyze infrastructure data from AWS. | Stable |\n| [GCP](https://www.selefra.io/docs/providers-connector/gcp)|The GCP Provider for Selefra can be used to extract data from many of the cloud services by GCP. The provider must be configured with credentials to extract and analyze infrastructure data from GCP. | Stable |\n| [K8S](https://www.selefra.io/docs/providers-connector/k8s)|The K8s Provider for Selefra can be used to extract data from many of the cloud services by K8s. The provider must be configured with credentials to extract and analyze infrastructure data from K8s. | Stable |\n| [Azure](https://www.selefra.io/docs/providers-connector/azure)| The Azure Provider for Selefra can be used to extract data from many of the cloud services by Azure. The provider must be configured with credentials to extract and analyze infrastructure data from Azure.    | Stable |\n| [Slack](https://www.selefra.io/docs/providers-connector/slack)| The Slack Provider for Selefra can be used to extract data from many of the cloud services by Slack. The provider must be configured with credentials to extract and analyze infrastructure data from Slack.    | Stable |\n| [Cloudflare](https://www.selefra.io/docs/providers-connector/cloudflare)| The Cloudflare Provider for Selefra can be used to extract data from many of the cloud services by Cloudflare. The provider must be configured with credentials to extract and analyze infrastructure data from Cloudflare.    | Stable |\n| [Datadog](https://www.selefra.io/docs/providers-connector/datadog)| The Datadog Provider for Selefra can be used to extract data from many of the cloud services by Datadog. The provider must be configured with credentials to extract and analyze infrastructure data from Datadog.    | Stable |\n| [Microsoft365](https://www.selefra.io/docs/providers-connector/microsoft365)| The Microsoft365 Provider for Selefra can be used to extract data from many of the cloud services by Microsoft365. The provider must be configured with credentials to extract and analyze infrastructure data from Microsoft365.    | Stable |\n| [Vercel](https://www.selefra.io/docs/providers-connector/vercel)| The Vercel Provider for Selefra can be used to extract data from many of the cloud services by Vercel. The provider must be configured with credentials to extract and analyze infrastructure data from Vercel.    | Stable |\n| [Github](https://www.selefra.io/docs/providers-connector/github)| The Github Provider for Selefra can be used to extract data from many of the cloud services by Github. The provider must be configured with credentials to extract and analyze infrastructure data from Github.    | Stable |\n| [GoogleWorksplace](https://www.selefra.io/docs/providers-connector/googleworksplace)| The GoogleWorksplace Provider for Selefra can be used to extract data from many of the cloud services by GoogleWorksplace. The provider must be configured with credentials to extract and analyze infrastructure data from GoogleWorksplace.    | Stable |\n| [Auth0](https://www.selefra.io/docs/providers-connector/auth0)| The Auth0 Provider for Selefra can be used to extract data from many of the cloud services by Auth0. The provider must be configured with credentials to extract and analyze infrastructure data from Auth0.    | Stable |\n| [Zendesk](https://www.selefra.io/docs/providers-connector/zendesk)| The Zendesk Provider for Selefra can be used to extract data from many of the cloud services by Zendesk. The provider must be configured with credentials to extract and analyze infrastructure data from Zendesk.    | Stable |\n| [Consul](https://www.selefra.io/docs/providers-connector/consul)| The Consul Provider for Selefra can be used to extract data from many of the cloud services by Consul. The provider must be configured with credentials to extract and analyze infrastructure data from Consul.    | Stable |\n| [Zoom](https://www.selefra.io/docs/providers-connector/zoom)| The Zoom Provider for Selefra can be used to extract data from many of the cloud services by Zoom. The provider must be configured with credentials to extract and analyze infrastructure data from Zoom.    | Stable |\n| [Gandi](https://www.selefra.io/docs/providers-connector/gandi)| The Gandi Provider for Selefra can be used to extract data from many of the cloud services by Gandi. The provider must be configured with credentials to extract and analyze infrastructure data from Gandi.    | Stable |\n| [Heroku](https://www.selefra.io/docs/providers-connector/heroku)| The Heroku Provider for Selefra can be used to extract data from many of the cloud services by Heroku. The provider must be configured with credentials to extract and analyze infrastructure data from Heroku.    | Stable |\n| [IBM](https://www.selefra.io/docs/providers-connector/ibm)| The IBM Provider for Selefra can be used to extract data from many of the cloud services by IBM. The provider must be configured with credentials to extract and analyze infrastructure data from IBM.    | Stable |\n| [Pagerduty](https://www.selefra.io/docs/providers-connector/pagerduty)| The Pagerduty Provider for Selefra can be used to extract data from many of the cloud services by Pagerduty. The provider must be configured with credentials to extract and analyze infrastructure data from Pagerduty.    | Stable |\n| [AliCloud](https://www.selefra.io/docs/providers-connector/alicloud)| The AliCloud Provider for Selefra can be used to extract data from many of the cloud services by AliCloud. The provider must be configured with credentials to extract and analyze infrastructure data from AliCloud.    | Stable |\n| [Okta](https://www.selefra.io/docs/providers-connector/okta)| The Okta Provider for Selefra can be used to extract data from many of the cloud services by Okta. The provider must be configured with credentials to extract and analyze infrastructure data from Okta.    | Stable |\n| [Oci](https://www.selefra.io/docs/providers-connector/oci)| The Oci Provider for Selefra can be used to extract data from many of the cloud services by Oci. The provider must be configured with credentials to extract and analyze infrastructure data from Oci.    | Stable |\n| [Boundary](https://www.selefra.io/docs/providers-connector/boundary)| The Boundary Provider for Selefra can be used to extract data from many of the cloud services by Boundary. The provider must be configured with credentials to extract and analyze infrastructure data from Boundary.    | Stable |\n| [Stripe](https://www.selefra.io/docs/providers-connector/stripe)| The Stripe Provider for Selefra can be used to extract data from many of the cloud services by Stripe. The provider must be configured with credentials to extract and analyze infrastructure data from Stripe.    | Stable |\n| [Planetscale](https://www.selefra.io/docs/providers-connector/planetscale)| The Planetscale Provider for Selefra can be used to extract data from many of the cloud services by Planetscale. The provider must be configured with credentials to extract and analyze infrastructure data from Planetscale.    | Stable |\n| [Snowflake](https://www.selefra.io/docs/providers-connector/snowflake)| The Snowflake Provider for Selefra can be used to extract data from many of the cloud services by Snowflake. The provider must be configured with credentials to extract and analyze infrastructure data from Snowflake.    | coming soon |\n\n## Community\n\nSelefra is a community-driven project, we welcome you to open a [GitHub Issue](https://github.com/selefra/selefra/issues/new/choose) to report a bug, suggest an improvement, or request new feature.\n\n-  Join <a href=\"https://selefra.io/community/join\"><img height=\"16\" alt=\"humanitarian\" src=\"https://user-images.githubusercontent.com/124020340/225563969-3f3d4c45-fb3f-4932-831d-01ab9e59c921.png\"></a> [Selefra Community](https://selefra.io/community/join) on Slack. We host `Community Hour` for tutorials and Q&As on regular basis.\n-  Follow us on <a href=\"https://twitter.com/SelefraCorp\"><img height=\"16\" alt=\"humanitarian\" src=\"https://user-images.githubusercontent.com/124020340/225564426-82f5afbc-5638-4123-871d-fec6fdc6457f.png\"></a> [Twitter](https://twitter.com/SelefraCorp) and share your thoughts！\n-  Email us at <a href=\"support@selefra.io\"><img height=\"16\" alt=\"humanitarian\" src=\"https://user-images.githubusercontent.com/124020340/225564710-741dc841-572f-4cde-853c-5ebaaf4d3d3c.png\"></a>&nbsp;support@selefra.io\n\n## Contributing\n\nFor developers interested in building Selefra codebase, read through [Contributing.md](https://github.com/selefra/selefra/blob/main/CONTRIBUTING.md) and [Selefra Roadmap](https://github.com/orgs/selefra/projects/1).\nLet us know what you would like to work on!\n\n## License\n\n[Mozilla Public License v2.0](https://github.com/selefra/selefra/blob/main/LICENSE)\n"
  },
  {
    "path": "SECURITY.md",
    "content": "# Security Policy\n\n## Supported Versions\n\nUse this section to tell people about which versions of your project are\ncurrently being supported with security updates.\n\n| Version | Supported          |\n| ------- | ------------------ |\n| 5.1.x   | :white_check_mark: |\n| 5.0.x   | :x:                |\n| 4.0.x   | :white_check_mark: |\n| < 4.0   | :x:                |\n\n## Reporting a Vulnerability\n\nUse this section to tell people how to report a vulnerability.\n\nTell them where to go, how often they can expect to get an update on a\nreported vulnerability, what to expect if the vulnerability is accepted or\ndeclined, etc.\n"
  },
  {
    "path": "cli_ui/client.go",
    "content": "package cli_ui\n\n//import (\n//\t\"context\"\n//\t\"errors\"\n//\t\"github.com/google/uuid\"\n//\t\"github.com/selefra/selefra-provider-sdk/storage\"\n//\t\"github.com/selefra/selefra/config\"\n//\t\"github.com/selefra/selefra/pkg/registry\"\n//\t\"github.com/selefra/selefra/pkg/storage/pgstorage\"\n//\t\"github.com/selefra/selefra/ui\"\n//)\n//\n//type Client struct {\n//\tRegistry      interface{}\n//\tPluginManager interface{}\n//\tStorage       storage.Storage\n//\tinstanceId    uuid.UUID\n//}\n//\n//func CreateClientFromConfig(ctx context.Context, cfg *config.SelefraConfig, instanceId uuid.UUID, provider *config.ProviderDecl, cp config.Provider) (*Client, error) {\n//\n//\thub := new(interface{})\n//\tpm := new(interface{})\n//\n//\tc := &Client{\n//\t\tStorage:       nil,\n//\t\tcfg:           cfg,\n//\t\tRegistry:      hub,\n//\t\tPluginManager: pm,\n//\t\tinstanceId:    instanceId,\n//\t}\n//\n//\tschema := config.GetSchemaKey(provider, cp)\n//\tsto, diag := pgstorage.Storage(ctx, pgstorage.WithSearchPath(schema))\n//\tif diag != nil {\n//\t\terr := ui.PrintDiagnostic(diag.GetDiagnosticSlice())\n//\t\tif err != nil {\n//\t\t\treturn nil, errors.New(\"failed to create pgstorage\")\n//\t\t}\n//\t}\n//\tif sto != nil {\n//\t\tc.Storage = sto\n//\t}\n//\n//\tc.Providers = registry.Providers{}\n//\tfor _, rp := range cfg.RequireProvidersBlock {\n//\t\tc.Providers.Set(registry.Provider{Name: rp.Name, Version: rp.Version})\n//\t}\n//\n//\treturn c, nil\n//}\n"
  },
  {
    "path": "cli_ui/print.go",
    "content": "package cli_ui\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com/hashicorp/go-hclog\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/fatih/color\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n)\n\n// The UI logs are printed to both the console and the log file\ntype uiPrinter struct {\n\n\t// log record logs\n\tlog *logger.Logger\n}\n\nfunc newUiPrinter() *uiPrinter {\n\tua := &uiPrinter{}\n\n\tua.log = logger.Default()\n\treturn ua\n}\n\nvar (\n\tprinterOnce sync.Once\n\tprinter     *uiPrinter\n)\n\nvar (\n\tYellowColor  = color.New(color.FgYellow).SprintFunc()\n\tRedColor     = color.New(color.FgRed).SprintFunc()\n\tGreenColor   = color.New(color.FgGreen).SprintFunc()\n\tBlueColor    = color.New(color.FgBlue).SprintFunc()\n\tMagentaColor = color.New(color.FgMagenta).SprintFunc()\n\tBlackColor   = color.New(color.FgBlack).SprintFunc()\n\tCyanColor    = color.New(color.FgCyan).SprintFunc()\n\tWhiteColor   = color.New(color.FgWhite).SprintFunc()\n)\n\n//// fsync write msg to p.fw\n//func (p *uiPrinter) fsync(color *color.Color, msg string) {\n//\tjsonLog := LogJSON{\n//\t\tCmd:   global.Cmd(),\n//\t\tStag:  global.Stage(),\n//\t\tMsg:   msg,\n//\t\tTime:  time.Now(),\n//\t\tLevel: getLevel(color),\n//\t}\n//\tbyteLog, err := json.Marshal(jsonLog)\n//\tif err != nil {\n//\t\tp.log.Error(err.Error())\n//\t\treturn\n//\t}\n//\n//\tstrLog := string(byteLog)\n//\t_, _ = p.fw.WriteString(strLog + \"\\n\")\n//}\n\n// sync do 2 things: 1. store msg to log file; 2. send msg to rpc server if rpc client exist\n// sync do not show anything\n//func (p *uiPrinter) sync(color *color.Color, msg string) {\n//\t// write to file\n//\tp.fsync(color, msg)\n//\n//\t// send to rpc\n//\t//logStreamClient := p.rpcClient.LogStreamClient()\n//\tp.step++\n//\tif color == ErrorColor {\n//\t\tcloud_sdk.SetStatus(\"error\")\n//\t\t//p.rpcClient.SetStatus(\"error\")\n//\t}\n//\n//\tif err := cloud_sdk.LogStreamSend(&logPb.ConnectMsg{\n//\t\tActionName: \"\",\n//\t\tData: &logPb.LogJOSN{\n//\t\t\tCmd:   global.Cmd(),\n//\t\t\tStag:  global.Stage(),\n//\t\t\tMsg:   msg,\n//\t\t\tTime:  timestamppb.Now(),\n//\t\t\tLevel: getLevel(color),\n//\t\t},\n//\t\tIndex: p.step,\n//\t\tMsg:   \"\",\n//\t\tBaseInfo: &logPb.BaseConnectionInfo{\n//\t\t\tToken:  cloud_sdk.Token(),\n//\t\t\tTaskId: cloud_sdk.TaskID(),\n//\t\t},\n//\t}); err != nil {\n//\t\tp.fsync(ErrorColor, err.Error())\n//\t\treturn\n//\t}\n//\n//\treturn\n//}\n\n// printf The behavior of printf is like fmt.Printf that it will format the info\n// when withLn is true, it will show format info with a \"\\n\" and call sync, else without a \"\\n\"\nfunc (p *uiPrinter) printf(color *color.Color, format string, args ...any) {\n\t// logger to file\n\tif p.log != nil {\n\t\tif color == ErrorColor {\n\t\t\tif _, f, l, ok := runtime.Caller(2); ok {\n\t\t\t\tprinter.log.Log(hclog.Error, \"%s %s:%d\", fmt.Sprintf(format, args...), f, l)\n\t\t\t}\n\t\t}\n\t\tp.log.Log(color2level(color), format, args...)\n\t}\n\n\t//msg := fmt.Sprintf(format, args...)\n\n\t//p.sync(color, msg)\n\n\tif color == nil {\n\t\tfmt.Printf(format, args...)\n\t} else {\n\t\t_, _ = color.Printf(format, args...)\n\t}\n\n}\n\n// println The behavior of println is like fmt.Println\n// it will show the log info and then call sync\nfunc (p *uiPrinter) println(color *color.Color, args ...any) {\n\t// logger to file\n\tif p.log != nil {\n\t\tif color == ErrorColor {\n\t\t\tif _, f, l, ok := runtime.Caller(2); ok {\n\t\t\t\tprinter.log.Log(hclog.Error, \"%s %s:%d\", fmt.Sprintln(args...), f, l)\n\t\t\t}\n\t\t}\n\t\tp.log.Log(color2level(color), fmt.Sprintln(args...))\n\t}\n\n\t//msg := fmt.Sprint(args...)\n\n\t//p.sync(color, msg)\n\n\tif color == nil {\n\t\tfmt.Println(args...)\n\t} else {\n\t\t_, _ = color.Println(args...)\n\t}\n\n\treturn\n}\n\nfunc color2level(color *color.Color) hclog.Level {\n\tswitch color {\n\tcase ErrorColor:\n\t\treturn hclog.Error\n\tcase WarningColor:\n\t\treturn hclog.Warn\n\tcase InfoColor:\n\t\treturn hclog.Info\n\tcase SuccessColor:\n\t\treturn hclog.Info\n\tdefault:\n\t\treturn hclog.Info\n\t}\n}\n\nvar levelColor = []*color.Color{\n\tInfoColor,\n\tInfoColor,\n\tInfoColor,\n\tInfoColor,\n\tWarningColor,\n\tErrorColor,\n\tErrorColor,\n}\n\nvar defaultLogger = logger.Default()\n\nfunc init() {\n\tprinterOnce.Do(func() {\n\t\tprinter = newUiPrinter()\n\t})\n}\n\nconst (\n\tprefixManaged   = \"managed\"\n\tprefixUnmanaged = \"unmanaged\"\n\tdefaultAlias    = \"default\"\n)\n\nvar (\n\tErrorColor   = color.New(color.FgRed, color.Bold)\n\tWarningColor = color.New(color.FgYellow, color.Bold)\n\t//InfoColor    = color.New(color.FgWhite, color.Bold)\n\tInfoColor    *color.Color = nil\n\tSuccessColor              = color.New(color.FgGreen, color.Bold)\n)\n\ntype LogJSON struct {\n\tCmd   string    `json:\"cmd\"`\n\tStag  string    `json:\"stag\"`\n\tMsg   string    `json:\"msg\"`\n\tTime  time.Time `json:\"time\"`\n\tLevel string    `json:\"level\"`\n}\n\nfunc getLevel(c *color.Color) string {\n\tvar level string\n\tswitch c {\n\tcase ErrorColor:\n\t\tlevel = \"error\"\n\tcase WarningColor:\n\t\tlevel = \"warn\"\n\tcase InfoColor:\n\t\tlevel = \"info\"\n\tcase SuccessColor:\n\t\tlevel = \"success\"\n\tdefault:\n\t}\n\treturn level\n}\n\nfunc Errorf(format string, a ...interface{}) {\n\tprinter.printf(ErrorColor, format, a...)\n}\n\nfunc Warningf(format string, a ...interface{}) {\n\tprinter.printf(WarningColor, format, a...)\n}\n\nfunc Successf(format string, a ...interface{}) {\n\tprinter.printf(SuccessColor, format, a...)\n}\n\n// Infof info without color\nfunc Infof(format string, a ...interface{}) {\n\tprinter.printf(InfoColor, format, a...)\n}\n\nfunc Errorln(a ...interface{}) {\n\tprinter.println(ErrorColor, a...)\n}\n\nfunc Warningln(a ...interface{}) {\n\tprinter.println(WarningColor, a...)\n}\n\nfunc Successln(a ...interface{}) {\n\tprinter.println(SuccessColor, a...)\n}\n\nfunc Infoln(a ...interface{}) {\n\tprinter.println(InfoColor, a...)\n}\n\nfunc Printf(c *color.Color, format string, a ...any) {\n\tprinter.printf(c, format, a...)\n}\n\nfunc Println(c *color.Color, a ...any) {\n\tprinter.println(c, a...)\n}\n\n//func Print(msg string, show bool) {\n//\tif show {\n//\t\tInfoln(msg)\n//\t\treturn\n//\t}\n//\n//\tprinter.sync(InfoColor, msg)\n//}\n\nfunc SaveLogToDiagnostic(diagnostics []*schema.Diagnostic) {\n\tfor i := range diagnostics {\n\t\tif int(diagnostics[i].Level()) >= int(hclog.LevelFromString(global.LogLevel())) {\n\t\t\tdefaultLogger.Log(hclog.LevelFromString(global.LogLevel())+1, diagnostics[i].Content())\n\t\t}\n\t}\n}\n\nvar sdkLogLevelToCLILevelMap map[schema.DiagnosticLevel]hclog.Level\n\nfunc init() {\n\tsdkLogLevelToCLILevelMap = make(map[schema.DiagnosticLevel]hclog.Level)\n\tsdkLogLevelToCLILevelMap[schema.DiagnosisLevelTrace] = hclog.Trace\n\tsdkLogLevelToCLILevelMap[schema.DiagnosisLevelDebug] = hclog.Debug\n\tsdkLogLevelToCLILevelMap[schema.DiagnosisLevelInfo] = hclog.Info\n\tsdkLogLevelToCLILevelMap[schema.DiagnosisLevelWarn] = hclog.Warn\n\tsdkLogLevelToCLILevelMap[schema.DiagnosisLevelError] = hclog.Error\n\tsdkLogLevelToCLILevelMap[schema.DiagnosisLevelFatal] = hclog.Error\n}\n\nfunc SDKLogLevelToCliLevel(level schema.DiagnosticLevel) hclog.Level {\n\tlogLevel, exists := sdkLogLevelToCLILevelMap[level]\n\tif exists {\n\t\treturn logLevel\n\t} else {\n\t\treturn hclog.Info\n\t}\n}\n\nfunc PrintDiagnostic(diagnostics []*schema.Diagnostic) error {\n\tvar err error\n\tfor _, diagnostic := range diagnostics {\n\t\tlogLevel := SDKLogLevelToCliLevel(diagnostic.Level())\n\t\tif int(logLevel) >= int(hclog.LevelFromString(global.LogLevel())) {\n\t\t\tdefaultLogger.Log(logLevel, diagnostic.Content())\n\t\t\tPrintln(levelColor[logLevel], diagnostic.Content())\n\t\t\tif diagnostic.Level() == schema.DiagnosisLevelError {\n\t\t\t\terr = errors.New(diagnostic.Content())\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}\n\nfunc PrintDiagnostics(diagnostics *schema.Diagnostics) error {\n\tif diagnostics == nil {\n\t\treturn nil\n\t}\n\treturn PrintDiagnostic(diagnostics.GetDiagnosticSlice())\n}\n"
  },
  {
    "path": "cli_ui/progress.go",
    "content": "package cli_ui\n\nimport (\n\t\"github.com/vbauerster/mpb/v7\"\n\t\"github.com/vbauerster/mpb/v7/decor\"\n\t\"io\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n// Bar represents a progress bar\ntype Bar struct {\n\tb       *mpb.Bar\n\tName    string\n\tDesc    string\n\tStart   time.Time\n\tCurrent time.Time\n}\n\n// Progress is a progress object\ntype Progress struct {\n\tp       *mpb.Progress\n\tbars    sync.Map\n\tbuilder strings.Builder\n\tlock    sync.Mutex\n}\n\nfunc ShowDesc(bar *Bar, wcc ...decor.WC) decor.Decorator {\n\tproducer := func(bar *Bar, wcc ...decor.WC) decor.DecorFunc {\n\t\treturn func(s decor.Statistics) string {\n\t\t\treturn bar.Desc\n\t\t}\n\t}\n\treturn decor.Any(producer(bar), wcc...)\n}\n\nfunc (p *Progress) TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) {\n\tp.IOBar(src, stream, totalSize)\n\treturn io.NopCloser(strings.NewReader(p.builder.String()))\n}\n\n// DefaultProgress creates a new progress object\nfunc DefaultProgress() *Progress {\n\tp := &Progress{\n\t\tp:    mpb.New(),\n\t\tbars: sync.Map{},\n\t}\n\treturn p\n}\nfunc (p *Progress) IOBar(name string, reader io.Reader, total int64) {\n\n\tbar := p.p.New(total,\n\t\tmpb.BarStyle().Rbound(\"|\"),\n\t\tmpb.PrependDecorators(\n\t\t\tdecor.CountersKibiByte(\"% .2f / % .2f\"),\n\t\t),\n\t\tmpb.AppendDecorators(\n\t\t\tdecor.EwmaETA(decor.ET_STYLE_GO, 90),\n\t\t\tdecor.Name(\" ] \"),\n\t\t\tdecor.EwmaSpeed(decor.UnitKiB, \"% .2f\", 60),\n\t\t),\n\t)\n\t// create proxy reader\n\tproxyReader := bar.ProxyReader(reader)\n\tdefer proxyReader.Close()\n\n\t// copy from proxyReader, ignoring errors\n\t_, _ = io.Copy(&p.builder, proxyReader)\n\tp.p.Wait()\n}\n\n// Add adds a new bar to the progress\nfunc (p *Progress) Add(name string, total int64) {\n\n\t_, ok := p.bars.Load(name)\n\tif ok {\n\t\treturn\n\t}\n\tvar bar Bar\n\tbar.b = p.p.AddBar(\n\t\ttotal,\n\t\tmpb.BarWidth(100),\n\t\tmpb.PrependDecorators(\n\t\t\tdecor.Name(name, decor.WCSyncSpaceR),\n\t\t\tdecor.CountersNoUnit(\"[%d/%d]\", decor.WCSyncWidth),\n\t\t),\n\t\tmpb.AppendDecorators(\n\t\t\tdecor.OnComplete(\n\t\t\t\tdecor.Elapsed(decor.ET_STYLE_GO),\n\t\t\t\t\"Success\",\n\t\t\t),\n\t\t),\n\t)\n\tbar.Start = time.Now()\n\tbar.Current = time.Now()\n\tbar.Name = name\n\tp.bars.Store(name, &bar)\n}\n\nfunc (p *Progress) Increment(name string, n int64) {\n\n\tbar, ok := p.bars.Load(name)\n\tif !ok {\n\t\treturn\n\t}\n\tbar.(*Bar).b.IncrInt64(n)\n\tbar.(*Bar).Current = time.Now()\n}\n\nfunc (p *Progress) Current(name string, n int64, desc ...string) {\n\tbar, ok := p.bars.Load(name)\n\tif !ok {\n\t\treturn\n\t}\n\tbar.(*Bar).b.SetCurrent(n)\n\tbar.(*Bar).Current = time.Now()\n\tbar.(*Bar).Desc = desc[0]\n}\n\nfunc (p *Progress) SetTotal(name string, n int64) {\n\tbar, ok := p.bars.Load(name)\n\tif !ok {\n\t\treturn\n\t}\n\tbar.(*Bar).b.SetTotal(n, false)\n\tbar.(*Bar).Current = time.Now()\n}\n\nfunc (p *Progress) Next(name string) {\n\tp.Increment(name, 1)\n}\n\nfunc (p *Progress) Done(name string) {\n\tbar, ok := p.bars.Load(name)\n\tif !ok {\n\t\treturn\n\t}\n\tbar.(*Bar).b.EnableTriggerComplete()\n}\n\nfunc (p *Progress) Wait(name string) {\n\tbar, ok := p.bars.Load(name)\n\tif !ok {\n\t\treturn\n\t}\n\tbar.(*Bar).b.Wait()\n}\n"
  },
  {
    "path": "cli_ui/select_providers.go",
    "content": "package cli_ui\n\nimport (\n\tui \"github.com/gizak/termui/v3\"\n\t\"github.com/gizak/termui/v3/widgets\"\n\t\"log\"\n)\n\nconst DefaultSelectProvidersTitle = \"[ Use arrows to move, Space to select, Enter to complete the selection ]\"\n\n// SelectProviders Give a list of providers and let the user select some of them\n// Does the installation sequence have to be consistent with the selected sequence? Temporarily, I think it can be inconsistent\nfunc SelectProviders(providers []string, title ...string) map[string]struct{} {\n\n\tif len(title) == 0 {\n\t\ttitle = append(title, DefaultSelectProvidersTitle)\n\t}\n\n\tselectProviders := make(map[string]struct{})\n\n\tif err := ui.Init(); err != nil {\n\t\tlog.Fatalf(\"failed to initialize termui: %v\", err)\n\t}\n\tdefer ui.Close()\n\n\tl := newList(title[0], listForShow(providers, selectProviders))\n\tui.Render(l)\n\n\tpreviousKey := \"\"\n\tuiEvents := ui.PollEvents()\n\n\tfor {\n\t\te := <-uiEvents\n\t\tswitch e.ID {\n\t\tcase \"j\", \"<Down>\":\n\t\t\tif len(l.Rows) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tl.ScrollDown()\n\t\tcase \"k\", \"<Up>\":\n\t\t\tif len(l.Rows) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tl.ScrollUp()\n\t\tcase \"<C-d>\":\n\t\t\tif len(l.Rows) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tl.ScrollHalfPageDown()\n\t\tcase \"<C-c>\":\n\t\t\treturn nil\n\t\tcase \"<C-u>\":\n\t\t\tif len(l.Rows) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tl.ScrollHalfPageUp()\n\t\tcase \"<C-f>\":\n\t\t\tif len(l.Rows) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tl.ScrollPageDown()\n\t\tcase \"<C-b>\":\n\t\t\tif len(l.Rows) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tl.ScrollPageUp()\n\t\tcase \"g\":\n\t\t\tif len(l.Rows) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif previousKey == \"g\" {\n\t\t\t\tl.ScrollTop()\n\t\t\t}\n\t\tcase \"<Enter>\":\n\t\t\treturn selectProviders\n\t\tcase \"<Space>\":\n\n\t\t\tif len(l.Rows) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Select or deselect provider\n\t\t\toperateProviderName := providers[l.SelectedRow]\n\t\t\tif _, exists := selectProviders[operateProviderName]; exists {\n\t\t\t\tdelete(selectProviders, operateProviderName)\n\t\t\t} else {\n\t\t\t\tselectProviders[operateProviderName] = struct{}{}\n\t\t\t}\n\t\t\tl.Rows = listForShow(providers, selectProviders)\n\n\t\tcase \"<Home>\":\n\t\t\tif len(l.Rows) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tl.ScrollTop()\n\t\tcase \"G\", \"<End>\":\n\t\t\tif len(l.Rows) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tl.ScrollBottom()\n\t\t}\n\n\t\tif previousKey == \"g\" {\n\t\t\tpreviousKey = \"\"\n\t\t} else {\n\t\t\tpreviousKey = e.ID\n\t\t}\n\n\t\tui.Render(l)\n\t}\n}\n\n// Create a list widgets for select providers\nfunc newList(title string, lines []string) *widgets.List {\n\tl := widgets.NewList()\n\tl.Rows = lines\n\tl.TextStyle = ui.NewStyle(ui.ColorYellow)\n\tl.WrapText = false\n\tl.Title = title\n\tl.BorderLeft = false\n\tl.BorderRight = false\n\tl.BorderTop = false\n\tl.BorderBottom = false\n\tl.SelectedRowStyle = ui.NewStyle(ui.ColorRed)\n\tl.SetRect(0, 0, 800, 30)\n\treturn l\n}\n\n// Shows all the providers and which ones are currently selected\nfunc listForShow(providers []string, selectedProviders map[string]struct{}) []string {\n\tvar listProviders []string\n\tfor _, provider := range providers {\n\t\tif _, exists := selectedProviders[provider]; exists {\n\t\t\t// Putting the checkbox first avoids the provider name alignment problem\n\t\t\tlistProviders = append(listProviders, \" [✔] \"+provider)\n\t\t} else {\n\t\t\tlistProviders = append(listProviders, \" [ ] \"+provider)\n\t\t}\n\t}\n\treturn listProviders\n}\n"
  },
  {
    "path": "cli_ui/select_providers_test.go",
    "content": "package cli_ui\n\nimport (\n\t\"testing\"\n)\n\nfunc TestSelectProviders(t *testing.T) {\n\t//providers := []string{\n\t//\t\"aws\",\n\t//\t\"gcp\",\n\t//\t\"azure\",\n\t//}\n\t//selectProviders := SelectProviders(providers)\n\t//t.Log(selectProviders)\n}\n\nfunc TestSelectProviders_Big(t *testing.T) {\n\t//var providers []string\n\t//for i := 0; i < 10000; i++ {\n\t//\tproviders = append(providers, fmt.Sprintf(\"%d-provider-%s\", i, id_util.RandomId()))\n\t//}\n\t//selectProviders := SelectProviders(providers)\n\t//t.Log(selectProviders)\n}\n"
  },
  {
    "path": "cli_ui/table.go",
    "content": "package cli_ui\n\nimport (\n\t\"fmt\"\n\t\"github.com/olekukonko/tablewriter\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// ShowTable Shows which tables are currently available\nfunc ShowTable(tableHeader []string, tableBody [][]string, tableFooter []string, setBorder bool) {\n\tdata := tableBody\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetHeader(tableHeader)\n\tif len(tableFooter) > 0 {\n\t\ttable.SetFooter(tableFooter) // Add Footer\n\t}\n\ttable.SetBorder(setBorder) // Set Border to false\n\ttable.AppendBulk(data)     // Add Bulk Data\n\ttable.Render()\n}\n\n// ShowRows Display the table on the console\n// TODO refactor function\nfunc ShowRows(tableHeader []string, tableBodyMatrix [][]string, tableFooter []string, setBorder bool) {\n\tbuilder := strings.Builder{}\n\ttableF := \"\\t%\" + strconv.Itoa(columnMaxWidth(tableHeader)) + \"s\"\n\tfor rowIndex, row := range tableBodyMatrix {\n\t\tbuilder.WriteString(fmt.Sprintf(\"\\n*********** Row %d **********\\n\\n\", rowIndex))\n\t\tfor columnIndex, column := range row {\n\t\t\tbuilder.WriteString(fmt.Sprintf(tableF+\":\\t%s\\n\", tableHeader[columnIndex], column))\n\t\t}\n\t}\n\tfmt.Println(builder.String())\n}\n\n// The width of the widest column of several columns is for column width alignment\nfunc columnMaxWidth(columns []string) int {\n\tmaxWidth := 0\n\tfor _, column := range columns {\n\t\tif len(column) > maxWidth {\n\t\t\tmaxWidth = len(column)\n\t\t}\n\t}\n\treturn maxWidth\n}\n"
  },
  {
    "path": "cli_ui/table_test.go",
    "content": "package cli_ui\n\nfunc ExampleShowRows() {\n\n\ttableHeader := []string{\n\t\t\"id\", \"name\", \"age\",\n\t}\n\ttableBody := [][]string{\n\t\t{\n\t\t\t\"1\", \"Tom\", \"18\",\n\t\t},\n\t\t{\n\t\t\t\"2\", \"Ada\", \"26\",\n\t\t},\n\t\t{\n\t\t\t\"3\", \"Sam\", \"30\",\n\t\t},\n\t}\n\ttableFooter := []string{\n\t\t\"footer1\", \"footer2\", \"footer\",\n\t}\n\tsetBorder := true\n\tShowRows(tableHeader, tableBody, tableFooter, setBorder)\n\t// Output:\n\t// *********** Row 0 **********\n\t//\n\t//\t  id:\t1\n\t//\tname:\tTom\n\t//\t age:\t18\n\t//\n\t//*********** Row 1 **********\n\t//\n\t//\t  id:\t2\n\t//\tname:\tAda\n\t//\t age:\t26\n\t//\n\t//*********** Row 2 **********\n\t//\n\t//\t  id:\t3\n\t//\tname:\tSam\n\t//\t age:\t30\n\n}\n\nfunc ExampleShowTable() {\n\n\ttableHeader := []string{\n\t\t\"id\", \"name\", \"age\",\n\t}\n\ttableBody := [][]string{\n\t\t{\n\t\t\t\"1\", \"Tom\", \"18\",\n\t\t},\n\t\t{\n\t\t\t\"2\", \"Ada\", \"26\",\n\t\t},\n\t\t{\n\t\t\t\"3\", \"Sam\", \"30\",\n\t\t},\n\t}\n\ttableFooter := []string{\n\t\t\"footer1\", \"footer2\", \"footer\",\n\t}\n\tsetBorder := true\n\tShowTable(tableHeader, tableBody, tableFooter, setBorder)\n\n\t// Output:\n\t// +---------+---------+--------+\n\t// |   ID    |  NAME   |  AGE   |\n\t// +---------+---------+--------+\n\t// |       1 | Tom     |     18 |\n\t// |       2 | Ada     |     26 |\n\t// |       3 | Sam     |     30 |\n\t// +---------+---------+--------+\n\t// | FOOTER1 | FOOTER2 | FOOTER |\n\t// +---------+---------+--------+\n}\n"
  },
  {
    "path": "cli_ui/user.go",
    "content": "package cli_ui\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/cloud_sdk\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"strings\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// CloudTokenRequestPath What is the request path to obtain the cloud token\n// If there is a change in the address of the cloud side, synchronize it here\nconst CloudTokenRequestPath = \"/Settings/accessTokens\"\n\n// InputCloudToken Guide the user to enter a cloud token\nfunc InputCloudToken(serverUrl string) (string, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\ttipsTemplate := `selefra will login https://app.selefra.io in your default browser.\nif login is successful, the token will be stored as a plain text file for future usage:\n\n   Enter your access token from https://app.selefra.io{{.CloudTokenRequestPath}}\n   or hit <ENTER> to complete login in browser:`\n\n\t// Render display tips\n\tdata := make(map[string]string)\n\tdata[\"CloudTokenRequestPath\"] = CloudTokenRequestPath\n\tinputCloudTokenTips, err := utils.RenderingTemplate(\"input-token-tips-template\", tipsTemplate, data)\n\tif err != nil {\n\t\treturn \"\", diagnostics.AddErrorMsg(\"input-token-tips-template render error: %s\", err.Error())\n\t}\n\tfmt.Println(inputCloudTokenTips)\n\n\t// Open a browser window to allow the user to log in\n\t_, _, _ = utils.OpenBrowser(\"https://app.selefra.io\" + CloudTokenRequestPath)\n\n\t// Read the token entered by the user\n\tvar rawToken string\n\t_, err = fmt.Scanln(&rawToken)\n\t//reader := bufio.NewReader(os.Stdin)\n\t//rawToken, err := reader.ReadString('\\n')\n\tif err != nil {\n\t\treturn \"\", diagnostics.AddErrorMsg(\"Input cloud token error: %s\", err.Error())\n\t}\n\tcloudToken := strings.TrimSpace(strings.Replace(rawToken, \"\\n\", \"\", -1))\n\tif cloudToken == \"\" {\n\t\treturn \"\", diagnostics.AddErrorMsg(\"No token provided\")\n\t}\n\n\treturn cloudToken, diagnostics\n}\n\n// ShowLoginSuccess The CLI prompt indicating successful login is displayed\nfunc ShowLoginSuccess(serverUrl string, cloudCredentials *cloud_sdk.CloudCredentials) {\n\tloginSuccessTemplate := `\nRetrieved token for user: {{.UserName}}.\nWelcome to Selefra CloudClient!\nLogged in to selefra as {{.UserName}} (https://{{.ServerHost}}/{{.OrgName}})\n`\n\ttemplate, err := utils.RenderingTemplate(\"login-success-tips-template\", loginSuccessTemplate, cloudCredentials)\n\tif err != nil {\n\t\tErrorf(\"render login success message error: %s\\n\", err.Error())\n\t\treturn\n\t}\n\tSuccessf(template)\n}\n\n// ShowLoginFailed Displays a login failure message\nfunc ShowLoginFailed(cloudToken string) {\n\tErrorf(\"You input token %s login failed \\n\", cloudToken)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ShowRetrievedCloudCredentials Displays the results of the local retrieval of login credentials\nfunc ShowRetrievedCloudCredentials(cloudCredentials *cloud_sdk.CloudCredentials) {\n\tif cloudCredentials == nil {\n\t\treturn\n\t}\n\tSuccessf(fmt.Sprintf(\"Auto login with user %s \\n\", cloudCredentials.UserName))\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ShowLogout Display the logout success prompt\nfunc ShowLogout(cloudCredentials *cloud_sdk.CloudCredentials) {\n\tif cloudCredentials == nil {\n\t\treturn\n\t}\n\tSuccessf(fmt.Sprintf(\"User %s logout success \\n\", cloudCredentials.UserName))\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "cmd/apply/apply.go",
    "content": "package apply\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/executors\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/spf13/cobra\"\n\t\"sync/atomic\"\n)\n\nfunc NewApplyCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:              \"apply\",\n\t\tShort:            \"Analyze infrastructure\",\n\t\tLong:             \"Analyze infrastructure\",\n\t\tPersistentPreRun: global.DefaultWrappedInit(),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\toutput, _ := cmd.PersistentFlags().GetString(\"output\")\n\t\t\tdir, _ := cmd.PersistentFlags().GetString(\"dir\")\n\t\t\topenaiApiKey, _ := cmd.PersistentFlags().GetString(\"openai_api_key\")\n\t\t\topenaiMode, _ := cmd.PersistentFlags().GetString(\"openai_mode\")\n\t\t\topenaiLimit, _ := cmd.PersistentFlags().GetUint64(\"openai_limit\")\n\t\t\t//projectWorkspace := \"./test_data/test_query_module\"\n\t\t\t//downloadWorkspace := \"./test_download\"\n\t\t\tinstructions := make(map[string]interface{})\n\t\t\tinstructions[\"output\"] = output\n\t\t\tinstructions[\"dir\"] = dir\n\t\t\tinstructions[\"openai_api_key\"] = openaiApiKey\n\t\t\tinstructions[\"openai_mode\"] = openaiMode\n\t\t\tinstructions[\"openai_limit\"] = openaiLimit\n\t\t\tprojectWorkspace := \"./\"\n\t\t\tdownloadWorkspace, _ := config.GetDefaultDownloadCacheDirectory()\n\n\t\t\treturn Apply(cmd.Context(), instructions, projectWorkspace, downloadWorkspace)\n\t\t},\n\t}\n\tcmd.PersistentFlags().StringP(\"output\", \"p\", \"\", \"display content format\")\n\tcmd.PersistentFlags().StringP(\"dir\", \"d\", \"\", \"define the output directory\")\n\tcmd.PersistentFlags().StringP(\"openai_api_key\", \"k\", \"\", \"your openai_api_key\")\n\tcmd.PersistentFlags().StringP(\"openai_mode\", \"m\", \"\", \"what mode to use for analysis\\n\")\n\tcmd.PersistentFlags().Uint64P(\"openai_limit\", \"i\", 10, \"how many pieces were analyzed in total\")\n\n\tcmd.SetHelpFunc(cmd.HelpFunc())\n\treturn cmd\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Apply a project\nfunc Apply(ctx context.Context, instructions map[string]interface{}, projectWorkspace, downloadWorkspace string) error {\n\n\thasError := atomic.Bool{}\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tif err := cli_ui.PrintDiagnostics(message); err != nil {\n\t\t\t\thasError.Store(true)\n\t\t\t}\n\t\t}\n\t})\n\td := executors.NewProjectLocalLifeCycleExecutor(&executors.ProjectLocalLifeCycleExecutorOptions{\n\t\tInstruction:          instructions,\n\t\tProjectWorkspace:     projectWorkspace,\n\t\tDownloadWorkspace:    downloadWorkspace,\n\t\tMessageChannel:       messageChannel,\n\t\tProjectLifeCycleStep: executors.ProjectLifeCycleStepQuery,\n\t\tFetchStep:            executors.FetchStepFetch,\n\t\tProjectCloudLifeCycleExecutorOptions: &executors.ProjectCloudLifeCycleExecutorOptions{\n\t\t\tEnableConsoleTips: true,\n\t\t\tIsNeedLogin:       true,\n\t\t},\n\t\t//DSN:                                  env.GetDatabaseDsn(),\n\t\tFetchWorkerNum: 1,\n\t\tQueryWorkerNum: 1,\n\t}).Execute(ctx)\n\tmessageChannel.ReceiverWait()\n\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\tcli_ui.Errorln(\"Apply failed\")\n\t\treturn err\n\t\t//} else if hasError.Load() {\n\t\t//\tcli_ui.Errorln(\"Apply failed\")\n\t\t//\treturn fmt.Errorf(\"Apply Failed\")\n\t} else {\n\t\tcli_ui.Infoln(\"Apply done\")\n\t\treturn nil\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "cmd/apply/apply_test.go",
    "content": "package apply\n\nimport (\n\t\"context\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestApply(t *testing.T) {\n\t//projectWorkspace := \"D:\\\\workspace\\\\module-mock-test\"\n\tprojectWorkspace := \"./test_data/test_query_module\"\n\t//projectWorkspace := \"D:\\\\selefra\\\\workplace\\\\sfslack-v2-bak\"\n\tdownloadWorkspace := \"./test_download\"\n\tInstructions := make(map[string]interface{})\n\tInstructions[\"dir\"] = \"./ssss\"\n\terr := Apply(context.Background(), Instructions, projectWorkspace, downloadWorkspace)\n\tassert.Nil(t, err)\n}\n"
  },
  {
    "path": "cmd/apply/test_data/test_query_module/modules.yaml",
    "content": "modules:\n  - name: Misconfigure-S3\n    uses: ./rules\n"
  },
  {
    "path": "cmd/apply/test_data/test_query_module/rules/rule.yaml",
    "content": "rules:\n  - name: ebs_encryption_is_disabled_by_default\n    query: |\n      SELECT\n        *\n      FROM\n        aws_ec2_regional_configs\n      WHERE\n        ebs_encryption_enabled_by_default IS FALSE;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.region}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n      test:\n        - test-b:\n            test: test\n            test1: test1\n    metadata:\n      author: Selefra\n      id: SF010117\n      provider: AWS\n      remediation:\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n      main_table: \"aws_ec2_regional_configs\"\n    output: \"EBS encryption is disabled by default, region id: {{.region}}\"\n    main_table: \"aws_ec2_regional_configs\""
  },
  {
    "path": "cmd/apply/test_data/test_query_module/selefra.yaml",
    "content": "selefra:\n  cloud:\n    project: example_project\n    organization: example_org\n  #    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: aws\n      version: latest\n#    - name: gcp\n#      source: gcp\n#      version: \">=0.0.9,<=0.0.10\"\n\n\nproviders:\n  - name: AWS001\n    cache: 1d\n    provider: aws\n    max_goroutines: 100\n"
  },
  {
    "path": "cmd/fetch/fetch.go",
    "content": "package fetch\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/executors\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/spf13/cobra\"\n\t\"sync/atomic\"\n)\n\nfunc NewFetchCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:              \"fetch\",\n\t\tShort:            \"Fetch resources from configured providers\",\n\t\tLong:             \"Fetch resources from configured providers\",\n\t\tPersistentPreRun: global.DefaultWrappedInit(),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\n\t\t\tprojectWorkspace := \"./\"\n\t\t\tdownloadWorkspace, _ := config.GetDefaultDownloadCacheDirectory()\n\n\t\t\t//cli_runtime.Init(projectWorkspace)\n\n\t\t\tFetch(projectWorkspace, downloadWorkspace)\n\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tcmd.SetHelpFunc(cmd.HelpFunc())\n\treturn cmd\n}\n\nfunc Fetch(projectWorkspace, downloadWorkspace string) *schema.Diagnostics {\n\n\thasError := atomic.Bool{}\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tif err := cli_ui.PrintDiagnostics(message); err != nil {\n\t\t\t\thasError.Store(true)\n\t\t\t}\n\t\t}\n\t})\n\td := executors.NewProjectLocalLifeCycleExecutor(&executors.ProjectLocalLifeCycleExecutorOptions{\n\t\tProjectWorkspace:                     projectWorkspace,\n\t\tDownloadWorkspace:                    downloadWorkspace,\n\t\tMessageChannel:                       messageChannel,\n\t\tProjectLifeCycleStep:                 executors.ProjectLifeCycleStepFetch,\n\t\tFetchStep:                            executors.FetchStepFetch,\n\t\tProjectCloudLifeCycleExecutorOptions: nil,\n\t\t//DSN:                                  env.GetDatabaseDsn(),\n\t\tFetchWorkerNum: 1,\n\t\tQueryWorkerNum: 20,\n\t}).Execute(context.Background())\n\tmessageChannel.ReceiverWait()\n\t_ = cli_ui.PrintDiagnostics(d)\n\tif utils.HasError(d) || hasError.Load() {\n\t\tcli_ui.Errorln(\"fetch failed!\")\n\t} else {\n\t\tcli_ui.Infoln(\"fetch done!\")\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/fetch/fetch_test.go",
    "content": "package fetch\n\nimport (\n\t\"testing\"\n)\n\nfunc TestFetch(t *testing.T) {\n\tprojectWorkspace := \"./test_data/test_fetch_module\"\n\tdownloadWorkspace := \"./test_download\"\n\tFetch(projectWorkspace, downloadWorkspace)\n}\n"
  },
  {
    "path": "cmd/fetch/test_data/test_fetch_module/modules.yaml",
    "content": "selefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  #  connection:\n  #    type: postgres\n  #    username: postgres\n  #    password: pass\n  #    host: localhost\n  #    port: 5432\n  #    database: postgres\n  #    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: mock\n      source: mock\n      version: latest\n\nproviders:\n  - name: mock\n    cache: 1d\n    provider: mock\n    foo-count: 1\n    bar-count: 1\n    sleep-seconds: 0"
  },
  {
    "path": "cmd/gpt/gpt.go",
    "content": "package gpt\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/executors\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/spf13/cobra\"\n\t\"sync/atomic\"\n)\n\nfunc NewGPTCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:              \"gpt [prompt]\",\n\t\tShort:            \"Use ChatGPT for analysis\",\n\t\tLong:             \"Use ChatGPT for analysis\",\n\t\tPersistentPreRun: global.DefaultWrappedInit(),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tif len(args) < 1 {\n\t\t\t\treturn errors.New(\"your need to input a prompt\")\n\t\t\t}\n\t\t\tquery := args[0]\n\t\t\topenaiApiKey, _ := cmd.PersistentFlags().GetString(\"openai_api_key\")\n\t\t\tdir, _ := cmd.PersistentFlags().GetString(\"dir\")\n\t\t\topenaiMode, _ := cmd.PersistentFlags().GetString(\"openai_mode\")\n\t\t\topenaiLimit, _ := cmd.PersistentFlags().GetUint64(\"openai_limit\")\n\t\t\toutput, _ := cmd.PersistentFlags().GetString(\"output\")\n\n\t\t\t//projectWorkspace := \"./test_data/test_query_module\"\n\t\t\t//downloadWorkspace := \"./test_download\"\n\n\t\t\tprojectWorkspace := \"./\"\n\t\t\tdownloadWorkspace, _ := config.GetDefaultDownloadCacheDirectory()\n\n\t\t\tinstructions := make(map[string]interface{})\n\t\t\tinstructions[\"query\"] = query\n\t\t\tinstructions[\"dir\"] = dir\n\t\t\tinstructions[\"openai_api_key\"] = openaiApiKey\n\t\t\tinstructions[\"openai_mode\"] = openaiMode\n\t\t\tinstructions[\"openai_limit\"] = openaiLimit\n\t\t\tinstructions[\"output\"] = output\n\n\t\t\tif instructions[\"query\"] == nil || instructions[\"query\"] == \"\" {\n\t\t\t\treturn errors.New(\"query is required\")\n\t\t\t}\n\n\t\t\treturn Gpt(cmd.Context(), instructions, projectWorkspace, downloadWorkspace)\n\t\t},\n\t}\n\n\tcmd.PersistentFlags().StringP(\"output\", \"p\", \"\", \"display content format\")\n\tcmd.PersistentFlags().StringP(\"dir\", \"d\", \"\", \"define the output directory\")\n\tcmd.PersistentFlags().StringP(\"openai_api_key\", \"k\", \"\", \"your openai_api_key\")\n\tcmd.PersistentFlags().StringP(\"openai_mode\", \"m\", \"\", \"what mode to use for analysis\")\n\tcmd.PersistentFlags().Uint64P(\"openai_limit\", \"i\", 10, \"how many pieces were analyzed in total\")\n\n\tcmd.SetHelpFunc(cmd.HelpFunc())\n\treturn cmd\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Apply a project\nfunc Gpt(ctx context.Context, instructions map[string]interface{}, projectWorkspace, downloadWorkspace string) error {\n\n\thasError := atomic.Bool{}\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tif err := cli_ui.PrintDiagnostics(message); err != nil {\n\t\t\t\thasError.Store(true)\n\t\t\t}\n\t\t}\n\t})\n\n\td := executors.NewProjectLocalLifeCycleExecutor(&executors.ProjectLocalLifeCycleExecutorOptions{\n\t\tInstruction:          instructions,\n\t\tProjectWorkspace:     projectWorkspace,\n\t\tDownloadWorkspace:    downloadWorkspace,\n\t\tMessageChannel:       messageChannel,\n\t\tProjectLifeCycleStep: executors.ProjectLifeCycleStepQuery,\n\t\tFetchStep:            executors.FetchStepFetch,\n\t\tProjectCloudLifeCycleExecutorOptions: &executors.ProjectCloudLifeCycleExecutorOptions{\n\t\t\tEnableConsoleTips: true,\n\t\t\tIsNeedLogin:       true,\n\t\t},\n\t\t//DSN:                                  env.GetDatabaseDsn(),\n\t\tFetchWorkerNum: 1,\n\t\tQueryWorkerNum: 1,\n\t}).Execute(ctx)\n\tmessageChannel.ReceiverWait()\n\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\tcli_ui.Errorln(\"Gpt failed\")\n\t\treturn err\n\t} else {\n\t\tcli_ui.Infoln(\"Selefra Exit\")\n\t\treturn nil\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "cmd/gpt/gpt_test.go",
    "content": "package gpt\n\nimport (\n\t\"context\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestGpt(t *testing.T) {\n\t//projectWorkspace := \"D:\\\\workspace\\\\module-mock-test\"\n\tprojectWorkspace := \"./test_data/test_query_module\"\n\t//projectWorkspace := \"D:\\\\selefra\\\\workplace\\\\sfslack-v2-bak\"\n\tdownloadWorkspace := \"./test_download\"\n\tInstructions := make(map[string]interface{})\n\tInstructions[\"query\"] = \"Please help me analyze the vulnerabilities in AWS S3?\"\n\tInstructions[\"openai_api_key\"] = \"xx\"\n\tInstructions[\"openai_mode\"] = \"gpt-3.5\"\n\tInstructions[\"openai_limit\"] = uint64(10)\n\terr := Gpt(context.Background(), Instructions, projectWorkspace, downloadWorkspace)\n\tassert.Nil(t, err)\n}\n"
  },
  {
    "path": "cmd/gpt/test_data/test_query_module/modules.yaml",
    "content": "selefra:\n  openai_api_key: openkey\n  openai_mode: gpt-3.5\n  openai_limit: 10\n  cloud:\n    project: example_project\n    organization: example_org\n  #    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: aws\n      version: latest\n#    - name: gcp\n#      source: gcp\n#      version: \">=0.0.9,<=0.0.10\"\n\n\nproviders:\n  - name: aws\n    cache: 7d\n    provider: aws\n    foo-count: 10\n    bar-count: 10\n    sleep-seconds: 1\n    max_goroutines: 1\n\nrules:\n#  - name: bucket_versioning_is_disabled\n#    query: \"Please help me analyze the vulnerabilities in AWS S3?\"\n#    output: \"S3 bucket versioning is disabled, arn: {{.arn}}\"\n"
  },
  {
    "path": "cmd/init/init.go",
    "content": "package init\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"github.com/selefra/selefra-provider-sdk/env\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/pkg/cli_env\"\n\t\"github.com/selefra/selefra/pkg/cloud_sdk\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/module_loader\"\n\t\"github.com/selefra/selefra/pkg/storage/pgstorage\"\n\t\"github.com/spf13/cobra\"\n\t\"os\"\n\t\"sync/atomic\"\n)\n\nfunc NewInitCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:   \"init [project name]\",\n\t\tShort: \"Prepare your working directory for other commands\",\n\t\tLong:  \"Prepare your working directory for other commands\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\n\t\t\trelevance, _ := cmd.PersistentFlags().GetString(\"relevance\")\n\t\t\tforce, _ := cmd.PersistentFlags().GetBool(\"force\")\n\n\t\t\tdownloadDirectory, err := config.GetDefaultDownloadCacheDirectory()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tprojectWorkspace := \"./\"\n\n\t\t\tdsn, err := getDsn(cmd.Context(), projectWorkspace, downloadDirectory)\n\t\t\tif err != nil {\n\t\t\t\tcli_ui.Errorf(\"Get dsn error: %s \\n\", err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn NewInitCommandExecutor(&InitCommandExecutorOptions{\n\t\t\t\tIsForceInit:       force,\n\t\t\t\tRelevanceProject:  relevance,\n\t\t\t\tProjectWorkspace:  projectWorkspace,\n\t\t\t\tDownloadWorkspace: downloadDirectory,\n\t\t\t\tDSN:               dsn,\n\t\t\t}).Run(cmd.Context())\n\t\t},\n\t}\n\tcmd.PersistentFlags().BoolP(\"force\", \"f\", false, \"force overwriting the directory if it is not empty\")\n\tcmd.PersistentFlags().StringP(\"relevance\", \"r\", \"\", \"associate to selefra cloud project, use only after login\")\n\n\tcmd.SetHelpFunc(cmd.HelpFunc())\n\treturn cmd\n}\n\nfunc getDsn(ctx context.Context, projectWorkspace, downloadWorkspace string) (string, error) {\n\n\t// 1. load from project workspace\n\tdsn, _ := loadDSNFromProjectWorkspace(ctx, projectWorkspace, downloadWorkspace)\n\tif dsn != \"\" {\n\t\tcli_ui.Infof(\"Find database connection in workspace. %s \\n\", projectWorkspace)\n\t\treturn dsn, nil\n\t}\n\n\t// 2. load from selefra cloud\n\tclient, diagnostics := cloud_sdk.NewCloudClient(cli_env.GetServerHost())\n\tif err := cli_ui.PrintDiagnostics(diagnostics); err != nil {\n\t\treturn \"\", err\n\t}\n\tif c, _ := client.GetCredentials(); c != nil {\n\t\tc, d := client.Login(c.Token)\n\t\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\td = client.SaveCredentials(c)\n\t\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\torgDSN, d := client.FetchOrgDSN()\n\t\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif orgDSN != \"\" {\n\t\t\tcli_ui.Infof(\"Find database connection in you selefra cloud. \\n\")\n\t\t\treturn orgDSN, nil\n\t\t}\n\t}\n\n\t// 3. get dsn from env\n\tdsn = os.Getenv(env.DatabaseDsn)\n\tif dsn != \"\" {\n\t\tcli_ui.Infof(\"Find database connection in your env. \\n\")\n\t\treturn dsn, nil\n\t}\n\n\t// 4. start default postgresql instance\n\thasError := atomic.Bool{}\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif err := cli_ui.PrintDiagnostics(message); err != nil {\n\t\t\thasError.Store(true)\n\t\t}\n\t})\n\tdsn = pgstorage.DefaultPostgreSQL(downloadWorkspace, messageChannel)\n\tmessageChannel.ReceiverWait()\n\tif dsn != \"\" {\n\t\tcli_ui.Infof(\"Start default postgresql. \\n\")\n\t\treturn dsn, nil\n\t}\n\n\treturn \"\", errors.New(\"Can not find database connection\")\n}\n\n// Look for DSN in the configuration of the project's working directory\nfunc loadDSNFromProjectWorkspace(ctx context.Context, projectWorkspace, downloadWorkspace string) (string, error) {\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\t// Any error while loading will not print\n\t\t//if utils.IsNotEmpty(message) {\n\t\t//\t_ = cli_ui.PrintDiagnostics(message)\n\t\t//}\n\t})\n\tloader, err := module_loader.NewLocalDirectoryModuleLoader(&module_loader.LocalDirectoryModuleLoaderOptions{\n\t\tModuleLoaderOptions: &module_loader.ModuleLoaderOptions{\n\t\t\tSource:            projectWorkspace,\n\t\t\tVersion:           \"\",\n\t\t\tDownloadDirectory: downloadWorkspace,\n\t\t\tProgressTracker:   nil,\n\t\t\tMessageChannel:    messageChannel,\n\t\t\tDependenciesTree:  []string{projectWorkspace},\n\t\t},\n\t})\n\tif err != nil {\n\t\tmessageChannel.SenderWaitAndClose()\n\t\treturn \"\", err\n\t}\n\trootModule, b := loader.Load(ctx)\n\tif !b {\n\t\treturn \"\", nil\n\t}\n\tif rootModule.SelefraBlock != nil && rootModule.SelefraBlock.ConnectionBlock != nil && rootModule.SelefraBlock.ConnectionBlock.BuildDSN() != \"\" {\n\t\treturn rootModule.SelefraBlock.ConnectionBlock.BuildDSN(), nil\n\t}\n\treturn \"\", nil\n}\n"
  },
  {
    "path": "cmd/init/init_command_executor.go",
    "content": "package init\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/grpc/shard\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n\t\"github.com/selefra/selefra-utils/pkg/pointer\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/cmd/init/rule_example\"\n\t\"github.com/selefra/selefra/cmd/version\"\n\t\"github.com/selefra/selefra/pkg/cloud_sdk\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/executors\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/modules/parser\"\n\t\"github.com/selefra/selefra/pkg/modules/planner\"\n\t\"github.com/selefra/selefra/pkg/plugin\"\n\t\"github.com/selefra/selefra/pkg/providers/local_providers_manager\"\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"github.com/selefra/selefra/pkg/storage/pgstorage\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"gopkg.in/yaml.v3\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync/atomic\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// InitCommandExecutorOptions The execution options of the executor that executes the initialization command\ntype InitCommandExecutorOptions struct {\n\n\t// Where to put the downloaded file\n\tDownloadWorkspace string\n\n\t// Which path to initialize as the working directory for your project\n\tProjectWorkspace string\n\n\t// Whether to force the initialization of the working directory\n\tIsForceInit bool\n\n\t// Which project in the cloud you want to associate with\n\tRelevanceProject string\n\n\t// The database link to use\n\tDSN string\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype InitCommandExecutor struct {\n\n\t// Used to connect to the selefra cloud\n\tcloudClient *cloud_sdk.CloudClient\n\n\t// Some options when executing the command\n\toptions *InitCommandExecutorOptions\n}\n\nfunc NewInitCommandExecutor(options *InitCommandExecutorOptions) *InitCommandExecutor {\n\treturn &InitCommandExecutor{\n\t\toptions: options,\n\t}\n}\n\nfunc (x *InitCommandExecutor) Run(ctx context.Context) error {\n\n\t// 1. Check and verify that the working directory can be initialized\n\tif !x.checkWorkspace() {\n\t\treturn nil\n\t}\n\n\t// 2. choose provider\n\tproviderSlice, err := x.chooseProvidersList(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(providerSlice) == 0 {\n\t\tcli_ui.Infof(\"You not select provider\\n\")\n\t}\n\n\t// init files\n\tselefraBlock := x.initSelefraYaml(ctx, providerSlice)\n\tif selefraBlock != nil {\n\t\tx.initProvidersYaml(ctx, selefraBlock.RequireProvidersBlock)\n\t}\n\n\tx.initRulesYaml(providerSlice)\n\n\t//x.initModulesYaml()\n\n\tcli_ui.Infof(\"Selefra has been successfully initialized!\\n\")\n\tcli_ui.Infof(\"Your new Selefra project \\\"%s\\\" was created!\\n\", selefraBlock.Name)\n\tcli_ui.Infof(\"To perform an initial analysis, run selefra apply.\\n\")\n\treturn nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *InitCommandExecutor) initHeaderOutput(providers []string) {\n\t//for i := range providers {\n\t//\tcli_ui.Successln(\" [✔]\" + providers[i] + \"\\n\")\n\t//}\n\tcli_ui.Infof(`\tRunning with selefra-cli %s\n\n\tThis command will walk you through creating a new Selefra project\n\n\tEnter a value or leave blank to accept the (default), and press <ENTER>.\n\tPress ^C at any time to quit.`, version.Version)\n\tcli_ui.Infof(\"\\n\\n\")\n}\n\nfunc (x *InitCommandExecutor) chooseProvidersList(ctx context.Context) ([]*registry.Provider, error) {\n\tproviderSlice, err := x.requestProvidersList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(providerSlice) == 0 {\n\t\treturn nil, fmt.Errorf(\"can not get provider list from registry\")\n\t}\n\n\tproviderNameSlice := make([]string, 0)\n\tfor _, provider := range providerSlice {\n\t\tproviderNameSlice = append(providerNameSlice, provider.Name)\n\t}\n\n\tx.initHeaderOutput(providerNameSlice)\n\n\tprovidersSet := cli_ui.SelectProviders(providerNameSlice)\n\tchooseProviderSlice := make([]*registry.Provider, 0)\n\tfor _, provider := range providerSlice {\n\t\tif _, exists := providersSet[provider.Name]; exists {\n\t\t\tchooseProviderSlice = append(chooseProviderSlice, provider)\n\t\t}\n\t}\n\treturn chooseProviderSlice, nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *InitCommandExecutor) checkWorkspace() bool {\n\n\t// 1. check if workspace dir exist, create it\n\t_, err := os.Stat(x.options.ProjectWorkspace)\n\tif errors.Is(err, os.ErrNotExist) {\n\t\terr = os.Mkdir(x.options.ProjectWorkspace, 0755)\n\t\tif err != nil {\n\t\t\tcli_ui.Errorf(\"Create workspace directory: %s failed: %s\\n\", x.options.ProjectWorkspace, err.Error())\n\t\t\treturn false\n\t\t}\n\t\tcli_ui.Infof(\"Create workspace directory: %s success\\n\", x.options.ProjectWorkspace)\n\t}\n\n\tif x.isNeedForceInit() {\n\t\tif !x.options.IsForceInit {\n\t\t\tcli_ui.Errorf(\"Directory %s is not empty, rerun in an empty directory, or use -- force/-f to force overwriting in the current directory\\n\", x.options.ProjectWorkspace)\n\t\t\treturn false\n\t\t} else if !x.reInitConfirm() {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Determine whether mandatory initialization is required\nfunc (x *InitCommandExecutor) isNeedForceInit() bool {\n\tdir, _ := os.ReadDir(x.options.ProjectWorkspace)\n\tfiles := make([]string, 0)\n\tfor _, v := range dir {\n\t\t// Ignore files that are used internally\n\t\tif v.Name() == \"logs\" || v.Name() == \"selefra\" || v.Name() == \"selefra.exe\" {\n\t\t\tcontinue\n\t\t}\n\t\tfiles = append(files, v.Name())\n\t}\n\treturn len(files) != 0\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst (\n\tSelefraInputInitForceConfirm     = \"SELEFRA_INPUT_INIT_FORCE_CONFIRM\"\n\tSelefraInputInitRelevanceProject = \"SELEFRA_INPUT_INIT_RELEVANCE_PROJECT\"\n)\n\n// reInitConfirm check if current workspace is selefra workspace, then tell user to choose if rewrite selefra workspace\nfunc (x *InitCommandExecutor) reInitConfirm() bool {\n\n\treader := bufio.NewReader(os.Stdin)\n\tcli_ui.Warningf(\"Warning: %s is already init. Continue and overwrite it?[Y/N]\", x.options.ProjectWorkspace)\n\ttext, err := reader.ReadString('\\n')\n\ttext = strings.TrimSpace(strings.ToLower(text))\n\tif err != nil && !errors.Is(err, io.EOF) {\n\t\tcli_ui.Errorf(\"Read you input error: %s\\n\", err.Error())\n\t\treturn false\n\t}\n\n\t// for test\n\tif text == \"\" {\n\t\ttext = os.Getenv(SelefraInputInitForceConfirm)\n\t}\n\n\tif text != \"y\" && text != \"Y\" {\n\t\tcli_ui.Errorf(\"Config file already exists\\n\")\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *InitCommandExecutor) initSelefraYaml(ctx context.Context, providerSlice []*registry.Provider) *module.SelefraBlock {\n\n\tselefraBlock := module.NewSelefraBlock()\n\tprojectName, b := x.getProjectName()\n\tif !b {\n\t\treturn nil\n\t}\n\tselefraBlock.Name = projectName\n\n\t// cloud block\n\tselefraBlock.CloudBlock = x.getCloudBlock(projectName)\n\n\t// cli version\n\tselefraBlock.CliVersion = version.Version\n\tselefraBlock.LogLevel = \"info\"\n\n\tif len(providerSlice) > 0 {\n\t\trequiredProviderSlice := make([]*module.RequireProviderBlock, len(providerSlice))\n\t\tfor index, provider := range providerSlice {\n\t\t\trequiredProviderBlock := module.NewRequireProviderBlock()\n\t\t\trequiredProviderBlock.Name = provider.Name\n\t\t\trequiredProviderBlock.Source = provider.Name\n\t\t\trequiredProviderBlock.Version = provider.Version\n\t\t\trequiredProviderSlice[index] = requiredProviderBlock\n\t\t}\n\t\tselefraBlock.RequireProvidersBlock = requiredProviderSlice\n\t}\n\n\tselefraBlock.ConnectionBlock = x.GetConnectionBlock()\n\n\tout, err := yaml.Marshal(selefraBlock)\n\tif err != nil {\n\t\tcli_ui.Errorf(\"Selefra block yaml.Marshal error: %s \\n\", err.Error())\n\t\treturn nil\n\t}\n\tvar selefraNode yaml.Node\n\terr = yaml.Unmarshal(out, &selefraNode)\n\tif err != nil {\n\t\tcli_ui.Errorf(\"Selefra yaml.Unmarshal error: %s \\n\", err.Error())\n\t\treturn nil\n\t}\n\tdocumentRoot := yaml.Node{\n\t\tKind: yaml.MappingNode,\n\t\tContent: []*yaml.Node{\n\t\t\t&yaml.Node{Kind: yaml.ScalarNode, Value: parser.SelefraBlockFieldName},\n\t\t\t&yaml.Node{Kind: yaml.MappingNode, Content: selefraNode.Content[0].Content},\n\t\t},\n\t}\n\tmarshal, err := yaml.Marshal(&documentRoot)\n\tif err != nil {\n\t\tcli_ui.Errorf(\"Selefra yaml.Marshal error: %s \\n\", err.Error())\n\t\treturn nil\n\t}\n\tselefraFullPath := filepath.Join(utils.AbsPath(x.options.ProjectWorkspace), \"selefra.yaml\")\n\terr = os.WriteFile(selefraFullPath, marshal, 0644)\n\tif err != nil {\n\t\tcli_ui.Errorf(\"\\tWrite %s error: %s \\n\", selefraFullPath, err.Error())\n\t} else {\n\t\t//cli_ui.Successf(\"\\tWrite %s success \\n\", selefraFullPath)\n\t}\n\n\treturn selefraBlock\n}\n\nfunc (x *InitCommandExecutor) getCloudBlock(projectName string) *module.CloudBlock {\n\n\tcloudBlock := module.NewCloudBlock()\n\tcloudBlock.Project = projectName\n\n\tif x.cloudClient != nil {\n\t\tcredentials, diagnostics := x.cloudClient.GetCredentials()\n\t\tif err := cli_ui.PrintDiagnostics(diagnostics); err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tcloudBlock.Organization = credentials.OrgName\n\t\tcloudBlock.HostName = credentials.ServerHost\n\t}\n\n\treturn cloudBlock\n}\n\n//// init module.yaml\n//func (x *InitCommandExecutor) initModulesYaml() {\n//\tconst moduleComment = `\n//modules:\n//  - name: AWS_Security_Demo\n//    uses:\n//    - ./rules/\n//`\n//\tmoduleFullPath := filepath.Join(utils.AbsPath(x.options.ProjectWorkspace), \"module.yaml\")\n//\terr := os.WriteFile(moduleFullPath, []byte(moduleComment), 0644)\n//\tif err != nil {\n//\t\tcli_ui.Errorf(\"Write %s error: %s\\n\", moduleFullPath, err.Error())\n//\t} else {\n//\t\tcli_ui.Successf(\"Write %s success\\n\", moduleFullPath)\n//\t}\n//}\n\nvar rulesMap map[string]string\n\nfunc init() {\n\trulesMap = make(map[string]string)\n\trulesMap[\"aws\"] = rule_example.Aws\n\trulesMap[\"azure\"] = rule_example.Azure\n\trulesMap[\"gcp\"] = rule_example.GCP\n\trulesMap[\"k8s\"] = rule_example.K8S\n}\n\nfunc (x *InitCommandExecutor) initRulesYaml(providerSlice []*registry.Provider) {\n\tfor _, provider := range providerSlice {\n\t\truleYamlString, exists := rulesMap[provider.Name]\n\t\tif !exists {\n\t\t\truleYamlString = rule_example.DefaultTemplate\n\t\t}\n\t\truleFullPath := filepath.Join(utils.AbsPath(x.options.ProjectWorkspace), fmt.Sprintf(\"rules_%s.yaml\", provider.Name))\n\t\terr := os.WriteFile(ruleFullPath, []byte(ruleYamlString), 0644)\n\t\tif err != nil {\n\t\t\tcli_ui.Errorf(\"\\tWrite %s error: %s \\n\", ruleFullPath, err.Error())\n\t\t} else {\n\t\t\t//cli_ui.Successf(\"\\tWrite %s success \\n\", ruleFullPath)\n\t\t}\n\t}\n}\n\nfunc (x *InitCommandExecutor) initProvidersYaml(ctx context.Context, requiredProviders module.RequireProvidersBlock) {\n\tif len(requiredProviders) == 0 {\n\t\tcli_ui.Infof(\"No required provider, do not init providers file \\n\")\n\t\treturn\n\t}\n\tproviders, b := x.makeProviders(ctx, requiredProviders)\n\tif !b {\n\t\treturn\n\t}\n\tout, err := yaml.Marshal(providers)\n\tif err != nil {\n\t\tcli_ui.Errorf(\"Providers block yaml.Marshal error: %s \\n\", err.Error())\n\t\treturn\n\t}\n\t//fmt.Println(\"Providers Yaml string: \" + string(out))\n\n\tvar providersNode yaml.Node\n\terr = yaml.Unmarshal(out, &providersNode)\n\tif err != nil {\n\t\tcli_ui.Errorf(\"Providers yaml.Unmarshal error: %s \\n\", err.Error())\n\t\treturn\n\t}\n\t//fmt.Println(fmt.Sprintf(\"length: %d\", len(providersNode.Content[0].Content[0].Content)))\n\tdocumentRoot := &yaml.Node{\n\t\tKind: yaml.MappingNode,\n\t\tContent: []*yaml.Node{\n\t\t\t&yaml.Node{Kind: yaml.ScalarNode, Value: parser.ProvidersBlockName},\n\t\t\t&yaml.Node{Kind: providersNode.Content[0].Kind, Content: providersNode.Content[0].Content},\n\t\t},\n\t}\n\tmarshal, err := yaml.Marshal(documentRoot)\n\tif err != nil {\n\t\tcli_ui.Errorf(\"Providers yaml.Marshal error: %s \\n\", err.Error())\n\t\treturn\n\t}\n\t//fmt.Println(\"Yaml string: \" + string(marshal))\n\tproviderFullName := filepath.Join(utils.AbsPath(x.options.ProjectWorkspace), \"providers.yaml\")\n\terr = os.WriteFile(providerFullName, marshal, 0644)\n\tif err != nil {\n\t\tcli_ui.Errorf(\"\\tWrite %s error: %s \\n\", providerFullName, err.Error())\n\t} else {\n\t\t//cli_ui.Successf(\"\\tWrite %s success \\n\", providerFullName)\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// TODO Automatically installs and starts the database and sets connection items\nfunc (x *InitCommandExecutor) GetConnectionBlock() *module.ConnectionBlock {\n\n\t//// 1. Try to get the DSN from the cloud\n\t//if x.cloudClient != nil {\n\t//\tdsn, diagnostics := x.cloudClient.FetchOrgDSN()\n\t//\tif err := cli_ui.PrintDiagnostics(diagnostics); err != nil {\n\t//\t\treturn nil\n\t//\t}\n\t//\tif dsn != \"\" {\n\t//\t\treturn x.parseDsnAsConnectionBlock(dsn)\n\t//\t}\n\t//}\n\t//\n\t//// 2.\n\n\t//cli_runtime.Init(x.options.ProjectWorkspace)\n\t//\n\t//dsn, diagnostics := cli_runtime.GetDSN()\n\t//if err := cli_ui.PrintDiagnostics(diagnostics); err != nil {\n\t//\treturn nil\n\t//}\n\t//if dsn != \"\" {\n\t//\treturn module.ParseConnectionBlockFromDSN(dsn)\n\t//}\n\n\treturn nil\n}\n\nfunc (x *InitCommandExecutor) getProjectName() (string, bool) {\n\n\t// 1. Use the specified one, if any\n\tif x.options.RelevanceProject != \"\" {\n\t\treturn x.options.RelevanceProject, true\n\t}\n\n\tdefaultProjectName := filepath.Base(utils.AbsPath(x.options.ProjectWorkspace))\n\n\t// 2. Let the user specify from standard input, the default project name is the name of the current folder\n\tvar err error\n\treader := bufio.NewReader(os.Stdin)\n\tfmt.Printf(\"Project name:(%s)\", defaultProjectName)\n\tprojectName, err := reader.ReadString('\\n')\n\tif err != nil {\n\t\tcli_ui.Errorf(\"Read you project name error: %s\\n\", err.Error())\n\t\treturn \"\", false\n\t}\n\tprojectName = strings.TrimSpace(strings.Replace(projectName, \"\\n\", \"\", -1))\n\tif projectName == \"\" {\n\t\tprojectName = defaultProjectName\n\t}\n\treturn projectName, true\n}\n\n// Pull all providers from the remote repository\nfunc (x *InitCommandExecutor) requestProvidersList(ctx context.Context) ([]*registry.Provider, error) {\n\tgithubRegistry, err := registry.NewProviderGithubRegistry(registry.NewProviderGithubRegistryOptions(x.options.DownloadWorkspace))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tproviderSlice, err := githubRegistry.List(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn providerSlice, nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *InitCommandExecutor) makeProviders(ctx context.Context, requiredProvidersBlock module.RequireProvidersBlock) (module.ProvidersBlock, bool) {\n\n\tprovidersBlock := make(module.ProvidersBlock, 0)\n\t// convert required provider block to\n\tfor _, requiredProvider := range requiredProvidersBlock {\n\n\t\t//cli_ui.Infof(\"Begin install provider %s \\n\", requiredProvider.Source)\n\n\t\tproviderInstallPlan := &planner.ProviderInstallPlan{\n\t\t\tProvider: registry.NewProvider(requiredProvider.Name, requiredProvider.Version),\n\t\t}\n\n\t\t// install providers\n\t\thasError := atomic.Bool{}\n\t\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\t\tif err := cli_ui.PrintDiagnostics(message); err != nil {\n\t\t\t\thasError.Store(true)\n\t\t\t}\n\t\t})\n\t\texecutor, d := executors.NewProviderInstallExecutor(&executors.ProviderInstallExecutorOptions{\n\t\t\tPlans: []*planner.ProviderInstallPlan{\n\t\t\t\tproviderInstallPlan,\n\t\t\t},\n\t\t\tMessageChannel:    messageChannel,\n\t\t\tDownloadWorkspace: x.options.DownloadWorkspace,\n\t\t\t// TODO\n\t\t\tProgressTracker: nil,\n\t\t})\n\t\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\t\tmessageChannel.SenderWaitAndClose()\n\t\t\treturn nil, false\n\t\t}\n\t\td = executor.Execute(ctx)\n\t\tmessageChannel.ReceiverWait()\n\t\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\t\treturn nil, false\n\t\t}\n\t\tif hasError.Load() {\n\t\t\treturn nil, false\n\t\t}\n\t\tcli_ui.Infof(\"\\tInstall %s@%s verified \\n\", requiredProvider.Source, requiredProvider.Version)\n\n\t\t// init\n\t\tcli_ui.Infof(\"\\tSynchronization %s@%s's config... \\n\", requiredProvider.Source, requiredProvider.Version)\n\t\tconfiguration, b := x.getProviderInitConfiguration(ctx, executor.GetLocalProviderManager(), providerInstallPlan)\n\t\tif !b {\n\t\t\treturn nil, false\n\t\t}\n\t\tproviderBlock := module.NewProviderBlock()\n\t\tproviderBlock.Provider = requiredProvider.Name\n\t\tproviderBlock.Name = requiredProvider.Name\n\t\tproviderBlock.Cache = \"1d\"\n\t\tproviderBlock.MaxGoroutines = pointer.ToUInt64Pointer(100)\n\t\tproviderBlock.ProvidersConfigYamlString = configuration\n\t\tprovidersBlock = append(providersBlock, providerBlock)\n\n\t\t//fmt.Println(\"Provider Block: \" + json_util.ToJsonString(providerBlock))\n\n\t\t//cli_ui.Infof(\"Init provider %s done \\n\", requiredProvider.Source)\n\t}\n\treturn providersBlock, true\n}\n\n// run provider & get it's init configuration\nfunc (x *InitCommandExecutor) getProviderInitConfiguration(ctx context.Context, localProviderManager *local_providers_manager.LocalProvidersManager, plan *planner.ProviderInstallPlan) (string, bool) {\n\n\t// start & get information\n\t//cli_ui.Infof(\"Begin init provider %s \\n\", plan.String())\n\n\t// Find the local path of the provider\n\tlocalProvider := &local_providers_manager.LocalProvider{\n\t\tProvider: plan.Provider,\n\t}\n\tinstalled, d := localProviderManager.IsProviderInstalled(ctx, localProvider)\n\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\treturn \"\", false\n\t}\n\tif !installed {\n\t\tcli_ui.Errorf(\"Provider %s not installed, can not exec init for it! \\n\", plan.String())\n\t\treturn \"\", false\n\t}\n\n\t// Find the local installation location of the provider\n\tlocalProviderMeta, d := localProviderManager.Get(ctx, localProvider)\n\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\treturn \"\", false\n\t}\n\n\t// Start provider\n\tplug, err := plugin.NewManagedPlugin(localProviderMeta.ExecutableFilePath, plan.Name, plan.Version, \"\", nil)\n\tif err != nil {\n\t\tcli_ui.Errorf(\"Start provider %s at %s failed: %s \\n\", plan.String(), localProvider.ExecutableFilePath, err.Error())\n\t\treturn \"\", false\n\t}\n\t// Close the provider at the end of the method execution\n\tdefer plug.Close()\n\n\t//cli_ui.Infof(\"Start provider %s success \\n\", plan.String())\n\n\t// Database connection option\n\tstorageOpt := postgresql_storage.NewPostgresqlStorageOptions(x.options.DSN)\n\tproviderBlock := module.NewProviderBlock()\n\tproviderBlock.Name = plan.Name\n\t// Because you do not need to actually interact with the database, it is set to public\n\tpgstorage.WithSearchPath(\"public\")(storageOpt)\n\topt, err := json.Marshal(storageOpt)\n\tif err != nil {\n\t\tcli_ui.Errorf(\"Json marshal postgresql options error: %s \\n\", err.Error())\n\t\treturn \"\", false\n\t}\n\n\t// Initialize the provider\n\tpluginProvider := plug.Provider()\n\t//var providerYamlConfiguration string = module.GetDefaultProviderConfigYamlConfiguration(plan.Name, plan.Version)\n\n\tproviderInitResponse, err := pluginProvider.Init(ctx, &shard.ProviderInitRequest{\n\t\tWorkspace: pointer.ToStringPointer(utils.AbsPath(x.options.ProjectWorkspace)),\n\t\tStorage: &shard.Storage{\n\t\t\tType:           0,\n\t\t\tStorageOptions: opt,\n\t\t},\n\t\tIsInstallInit: pointer.FalsePointer(),\n\t\t// Without passing in any configuration, there is no interaction with the database\n\t\tProviderConfig: nil,\n\t})\n\tif err != nil {\n\t\tcli_ui.Errorf(\"Start provider failed: %s \\n\", err.Error())\n\t\treturn \"\", false\n\t}\n\tif err := cli_ui.PrintDiagnostics(providerInitResponse.Diagnostics); err != nil {\n\t\treturn \"\", false\n\t}\n\tcli_ui.Infof(\"\\tSynchronization %s 's config successful \\n\", plan.String())\n\n\t// Get information about the started provider\n\tinformation, err := pluginProvider.GetProviderInformation(ctx, &shard.GetProviderInformationRequest{})\n\tif err != nil {\n\t\tcli_ui.Errorf(\"Provider %s, get provider information failed: %s \\n\", plan.String(), err.Error())\n\t\treturn \"\", false\n\t}\n\n\t// just for debug\n\t//fmt.Println(\"Provider Information Name: \" + json_util.ToJsonString(information.Name))\n\t//fmt.Println(\"Provider Information Version: \" + json_util.ToJsonString(information.Version))\n\t//fmt.Println(\"Provider Information DefaultConfiguration: \" + json_util.ToJsonString(information.DefaultConfigTemplate))\n\n\treturn information.DefaultConfigTemplate, true\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "cmd/init/init_command_executor_test.go",
    "content": "package init\n\nimport (\n\t\"context\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc TestInitCommandExecutor_Run(t *testing.T) {\n\n\t_ = os.Setenv(SelefraInputInitForceConfirm, \"y\")\n\n\terr := NewInitCommandExecutor(&InitCommandExecutorOptions{\n\t\tDownloadWorkspace: \"./test_download\",\n\t\tProjectWorkspace:  \"./test_data\",\n\t\tIsForceInit:       true,\n\t\tRelevanceProject:  \"\",\n\t\tDSN:               \"\",\n\t}).Run(context.Background())\n\tassert.NotNil(t, err)\n\n}\n"
  },
  {
    "path": "cmd/init/init_test.go",
    "content": "package init\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/env\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc Test_getDsn(t *testing.T) {\n\tprojectWorkspace := \"./test_data\"\n\tdownloadWorkspace := \"./test_download\"\n\tos.Setenv(env.DatabaseDsn, \"\")\n\tdsn, err := getDsn(context.Background(), projectWorkspace, downloadWorkspace)\n\tassert.Nil(t, err)\n\tassert.NotEmpty(t, dsn)\n}\n"
  },
  {
    "path": "cmd/init/rule_example/aws.yaml",
    "content": "rules:\n  - name: mfa_delete_is_disable\n    query: |-\n      SELECT\n        *\n      FROM\n        aws_s3_buckets\n      WHERE\n        versioning_status IS DISTINCT\n      FROM\n        'Enabled'\n        OR versioning_mfa_delete IS DISTINCT\n      FROM\n        'Enabled';\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure MFA Delete is enable on S3 buckets.\n      id: SF010103\n      provider: AWS\n      severity: Medium\n      tags:\n        - Security\n        - Misconfiguration\n        - CIS Benchmark\n        - PCI\n        - GDPR\n        - APRA\n        - MAS\n        - NIST4\n        - ISO 27001\n        - SOC 2\n      title: MFA delete is disable\n    output: \"MFA delete is disable, arn: {{.arn}}\"\n  - name: bucket_logging_disable\n    query: |-\n      SELECT\n        *\n      FROM\n        aws_s3_buckets\n      WHERE\n        logging_target_bucket IS NULL\n        AND logging_target_prefix IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure S3 bucket logging is enabled.\n      id: SF010111\n      provider: AWS\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n        - PCI\n        - HIPAA\n        - GDPR\n        - APRA\n        - MAS\n        - NIST4\n        - ISO 27001\n        - SOC 2\n        - AWS Config\n      title: S3 bucket logging disable\n    output: \"S3 bucket logging disable, arn: {{.arn}}\"\n  - name: ebs_encryption_is_disabled_by_default\n    query: |-\n      SELECT\n        *\n      FROM\n        aws_ec2_regional_configs\n      WHERE\n        ebs_encryption_enabled_by_default IS FALSE;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.region}}'\n      resource_region: '{{.region}}'\n      resource_type: EC2\n    metadata:\n      author: Selefra\n      description: Ensure that EBS encryption is enabled by default.\n      id: SF010301\n      provider: AWS\n      severity: Medium\n      tags:\n        - Security\n        - Misconfiguration\n        - CIS Benchmark\n        - PCI\n        - HIPAA\n        - GDPR\n        - ISO 27001\n        - SOC 2\n      title: EBS encryption is disabled by default\n    output: \"EBS encryption is disabled by default, region id: {{.region}}\"\n"
  },
  {
    "path": "cmd/init/rule_example/azure.yaml",
    "content": "rules:\n  - name: secure_transfer_required_is_set_to_disabled\n    query: |-\n      SELECT\n        *\n      FROM\n        azure_storage_accounts\n      WHERE\n        ( properties ->> 'supportsHttpsTrafficOnly' ) :: BOOLEAN = FALSE;\n    labels:\n      resource_account_id: '{{.subscription_id}}'\n      resource_id: '{{.name}}'\n      resource_region: '{{.location}}'\n      resource_type: Storage Account\n      bucket_url: 'https://{{.name}}.blob.core.windows.net'\n    metadata:\n      author: Selefra\n      description: Enable data encryption in transit.\n      id: SF030101\n      provider: Azure\n      severity: Medium\n      tags:\n        - Attack Surface\n        - Security\n        - Misconfiguration\n        - CIS Benchmark\n        - PCI\n        - ISO 27001\n        - SOC 2\n      title: Secure transfer required is set to disabled\n    output: \"Secure transfer required is set to disabled, bucket name: {{.name}}, location: {{.location}}\"\n  - name: public_network_access_is_set_to_allow\n    query: |-\n      SELECT\n        *\n      FROM\n        azure_storage_accounts\n      WHERE\n        properties -> 'network_acls' ->> 'defaultAction' IS DISTINCT FROM 'Deny';\n    labels:\n      resource_account_id: '{{.subscription_id}}'\n      resource_id: '{{.name}}'\n      resource_region: '{{.location}}'\n      resource_type: Storage Account\n      bucket_url: 'https://{{.name}}.blob.core.windows.net'\n    metadata:\n      author: Selefra\n      description: Ensure Default Network Access Rule for Storage Accounts is Set to Deny.\n      id: SF030105\n      provider: Azure\n      severity: High\n      tags:\n        - Attack Scenarios\n        - Attack Surface\n        - Security\n        - Misconfiguration\n        - CIS Benchmark\n        - PCI\n        - ISO 27001\n        - SOC 2\n      title: Public network access is set to allow\n    output: \"Public network access is set to allow, bucket name: {{.name}}, location: {{.location}}\"\n  - name: unattached_disk_exists\n    query: |-\n      SELECT\n        *\n      FROM\n        azure_compute_disks\n      WHERE\n        properties ->> 'diskState' = 'Unattached';\n    labels:\n      resource_account_id: '{{.subscription_id}}'\n      resource_id: '{{.name}}'\n      resource_region: '{{.location}}'\n      resource_type: Virtual Machines\n    metadata:\n      author: Selefra\n      description: Identify any unattached (unused) Microsoft Azure virtual machine disk volumes available within your Azure cloud account and delete them in order to lower the cost of your monthly bill and reduce the risk of sensitive data leakage.\n      id: SF030310\n      provider: Azure\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n        - Cost optimisation\n      title: Unattached disk exists\n    output: \"Unattached disk exists, disk name: {{.name}}, location: {{.location}}\"\n"
  },
  {
    "path": "cmd/init/rule_example/data.go",
    "content": "package rule_example\n\nimport (\n\t_ \"embed\"\n)\n\n//go:embed aws.yaml\nvar Aws string\n\n//go:embed azure.yaml\nvar Azure string\n\n//go:embed gcp.yaml\nvar GCP string\n\n//go:embed k8s.yaml\nvar K8S string\n\n//go:embed default_template.yaml\nvar DefaultTemplate string\n"
  },
  {
    "path": "cmd/init/rule_example/default_template.yaml",
    "content": "#rules:\n#  - name: here_is_the_name_of_your_rule\n#    query: |-\n#      SELECT\n#        *\n#      FROM\n#        demo_table\n#      WHERE\n#        demo_item;\n#    output: \"Here is the output of your rule when it hits, resource name: {{.name}}\""
  },
  {
    "path": "cmd/init/rule_example/gcp.yaml",
    "content": "rules:\n  - name: bucket_does_not_have_uniform_bucket_level_access_enabled\n    query: |-\n      SELECT\n        *\n      FROM\n        gcp_storage_buckets\n      WHERE\n        uniform_bucket_level_access :: jsonb ->> 'Enabled' = 'false';\n    labels:\n      resource_account_id: '{{.project_id}}'\n      resource_id: '{{.name}}'\n      resource_region: '{{.location}}'\n      resource_type: Cloud Storage\n      bucket_url: 'https://storage.googleapis.com/{{.name}}'\n    metadata:\n      author: Selefra\n      description: Ensure That Cloud Storage Buckets Have Uniform Bucket-Level Access Enabled.\n      id: SF020106\n      provider: GCP\n      severity: Low\n      tags:\n        - Attack Surface\n        - Security\n        - Misconfiguration\n        - CIS Benchmark\n        - PCI\n        - ISO 27001\n      title: Bucket does not have uniform bucket-level access enabled\n    output: \"Bucket does not have uniform bucket-level access enabled, bucket name: {{.name}}, region: {{.location}}\"\n  - name: bucket_logging_disable\n    query: |-\n      SELECT\n        *\n      FROM\n        gcp_storage_buckets\n      WHERE\n        logging IS NULL;\n    labels:\n      resource_account_id: '{{.project_id}}'\n      resource_id: '{{.name}}'\n      resource_region: '{{.location}}'\n      resource_type: Cloud Storage\n      bucket_url: 'https://storage.googleapis.com/{{.name}}'\n    metadata:\n      author: Selefra\n      description: Ensure bucket enable logging.\n      id: SF020112\n      provider: GCP\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n        - ISO 27001\n      title: Bucket logging disable\n    output: \"Bucket logging disable, bucket name: {{.name}}, region: {{.location}}\"\n  - name: instance_disable_deletion_protection\n    query: |-\n      SELECT\n        a1.*,\n        a2 ->> 'network_i_p' AS private_ip,\n        a3 ->> 'nat_i_p' AS public_ip\n      FROM\n        gcp_compute_instances AS a1,\n        jsonb_array_elements ( a1.network_interfaces :: JSONB ) AS a2,\n        jsonb_array_elements ( a2 -> 'access_configs' ) AS a3\n      WHERE\n        deletion_protection IS FALSE;\n    labels:\n      resource_account_id: '{{.project_id}}'\n      resource_id: '{{.id}}'\n      resource_region: '{{.zone}}'\n      resource_type: Compute\n      public_ip: '{{.public_ip}}'\n      private_ip: '{{.private_ip}}'\n    metadata:\n      author: Selefra\n      description: Ensure that your production Google Compute Engine instances have Deletion Protection feature enabled in order to protect them from being accidentally deleted.\n      id: SF020314\n      provider: GCP\n      remediation: ./remediation/compute/instance_disable_deletion_protection.md\n      severity: Medium\n      tags:\n        - Security\n        - Misconfiguration\n      title: Instance disable deletion protection\n    output: \"Instance disable deletion protection, instance id: {{.id}}, zone: {{.zone}}\"\n"
  },
  {
    "path": "cmd/init/rule_example/k8s.yaml",
    "content": "rules:\n  - name: cronjob_cpu_no_limit\n    query: |-\n      SELECT\n        a1.*\n      FROM\n        k8s_batch_cron_jobs AS a1,\n        jsonb_array_elements (\n          spec_job_template -> 'spec' -> 'template' -> 'spec' -> 'containers'\n        ) AS a2\n      WHERE\n        a2 -> 'resources' -> 'limits' ->> 'cpu' IS NULL;\n    labels:\n      resource_account_id: '{{.namespace}}'\n      resource_id: '{{.name}}'\n      resource_region: 'Not available'\n      resource_type: CronJob\n    metadata:\n      author: Selefra\n      description: Containers in a CronJob should have CPU limit which restricts the container to use no more than a given amount of CPU.\n      id: SF050101\n      provider: K8S\n      severity: Low\n      tags:\n        - Misconfiguration\n        - NSA\n      title: Cronjob cpu no limit\n    output: \"Cronjob cpu no limit, name: {{.name}}\"\n  - name: daemonset_memory_no_limit\n    query: |-\n      SELECT\n        a1.*\n      FROM\n        k8s_apps_daemon_sets AS a1,\n        jsonb_array_elements (\n          spec_template -> 'spec' -> 'containers'\n        ) AS a2\n      WHERE\n        a2 -> 'resources' -> 'limits' ->> 'memory' IS NULL;\n    labels:\n      resource_account_id: '{{.namespace}}'\n      resource_id: '{{.name}}'\n      resource_region: 'Not available'\n      resource_type: DaemonSet\n    metadata:\n      author: Selefra\n      description: Containers in a DaemonSet should have memory limit which restricts the container to use no more than a given amount of user or system memory.\n      id: SF050203\n      provider: K8S\n      severity: Low\n      tags:\n        - Misconfiguration\n        - NSA\n      title: Daemonset memory no limit\n    output: \"Daemonset memory no limit, name: {{.name}}\"\n"
  },
  {
    "path": "cmd/init/test_data/init.sh",
    "content": "#!/usr/bin/env bash\n#######################################################################################################################\n#                                                                                                                     #\n#                              This script helps you test interactive programs                                        #\n#                                                                                                                     #\n#                                                                                                                     #\n#                                                                                                   Version: 0.0.1    #\n#                                                                                                                     #\n#######################################################################################################################\n\n# for command `selefra init`\ncd ../../../\ngo build\nrm -rf ./test\nmkdir test\nmv selefra.exe ./test\ncd test\necho \"begin run command selefra init\"\n./selefra.exe init $@\n\n"
  },
  {
    "path": "cmd/login/login.go",
    "content": "package login\n\nimport (\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/cli_env\"\n\t\"github.com/selefra/selefra/pkg/cloud_sdk\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc NewLoginCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:              \"login [token]\",\n\t\tShort:            \"Login to selefra cloud using token\",\n\t\tLong:             \"Login to selefra cloud using token\",\n\t\tPersistentPreRun: global.DefaultWrappedInit(),\n\t\tRunE:             RunFunc,\n\t}\n\n\tcmd.SetHelpFunc(cmd.HelpFunc())\n\treturn cmd\n}\n\nfunc RunFunc(cmd *cobra.Command, args []string) error {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tcloudServerHost := cli_env.GetServerHost()\n\tlogger.InfoF(\"Use server address: %s\", cloudServerHost)\n\n\tclient, d := cloud_sdk.NewCloudClient(cloudServerHost)\n\tif err := cli_ui.PrintDiagnostics(diagnostics); err != nil {\n\t\treturn err\n\t}\n\tlogger.InfoF(\"Create cloud client success \\n\")\n\n\tvar token string\n\tif len(args) != 0 {\n\t\ttoken = args[0]\n\t\tcli_ui.Warningf(\"Security warning: Entering a token directly on the command line will be recorded in the command line history and may cause your token to leak! \\n\")\n\t}\n\n\t// If you are already logged in, repeat login is not allowed and you must log out first\n\tgetCredentials, _ := client.GetCredentials()\n\tif getCredentials != nil {\n\t\tcli_ui.Errorf(\"You already logged in as %s, please logout first. \\n\", getCredentials.UserName)\n\t\treturn nil\n\t}\n\n\t// Read the token from standard input\n\tif token == \"\" {\n\t\ttoken, d = cli_ui.InputCloudToken(cloudServerHost)\n\t\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif token == \"\" {\n\t\tcli_ui.Errorf(\"Token can not be empty! \\n\")\n\t\treturn nil\n\t}\n\n\tcredentials, d := client.Login(token)\n\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\tcli_ui.ShowLoginFailed(token)\n\t\treturn nil\n\t}\n\n\tcli_ui.ShowLoginSuccess(cloudServerHost, credentials)\n\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/login/login_test.go",
    "content": "package login\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestRunFunc(t *testing.T) {\n\terr := RunFunc(nil, nil)\n\tassert.Nil(t, err)\n}\n"
  },
  {
    "path": "cmd/login/test_data/login.sh",
    "content": "#!/usr/bin/env bash\n#######################################################################################################################\n#                                                                                                                     #\n#                              This script helps you test interactive programs                                        #\n#                                                                                                                     #\n#                                                                                                                     #\n#                                                                                                   Version: 0.0.1    #\n#                                                                                                                     #\n#######################################################################################################################\n\n# for command `selefra login`\ncd ../../../\ngo build\nrm -rf ./test\nmkdir test\nmv selefra.exe ./test\ncd test\necho \"begin run command selefra login\"\n./selefra.exe login $@\n\n"
  },
  {
    "path": "cmd/logout/logout.go",
    "content": "package logout\n\nimport (\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/pkg/cli_env\"\n\t\"github.com/selefra/selefra/pkg/cloud_sdk\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc NewLogoutCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:   \"logout\",\n\t\tShort: \"Logout to selefra cloud\",\n\t\tLong:  \"Logout to selefra cloud\",\n\t\tRunE:  RunFunc,\n\t}\n\n\treturn cmd\n}\n\nfunc RunFunc(cmd *cobra.Command, args []string) error {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// Server address\n\tcloudServerHost := cli_env.GetServerHost()\n\tlogger.InfoF(\"Use server address: %s\", cloudServerHost)\n\n\tclient, d := cloud_sdk.NewCloudClient(cloudServerHost)\n\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\treturn cli_ui.PrintDiagnostics(diagnostics)\n\t}\n\tlogger.InfoF(\"Create cloud client success \\n\")\n\n\t// If you are not logged in, you are not allowed to log out\n\tcredentials, _ := client.GetCredentials()\n\tif credentials == nil {\n\t\tcli_ui.Errorln(\"You are not login, please login first! \\n\")\n\t\treturn nil\n\t}\n\tlogger.InfoF(\"Get credentials success \\n\")\n\n\t// Destroy the local token\n\tclient.SetToken(credentials.Token)\n\tif err := cli_ui.PrintDiagnostics(client.Logout()); err != nil {\n\t\treturn err\n\t}\n\tcli_ui.ShowLogout(credentials)\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/logout/logout_test.go",
    "content": "package logout\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestRunFunc(t *testing.T) {\n\terr := RunFunc(nil, nil)\n\tassert.Nil(t, err)\n}\n"
  },
  {
    "path": "cmd/logout/test_data/logout.sh",
    "content": "#!/usr/bin/env bash\n#######################################################################################################################\n#                                                                                                                     #\n#                              This script helps you test interactive programs                                        #\n#                                                                                                                     #\n#                                                                                                                     #\n#                                                                                                   Version: 0.0.1    #\n#                                                                                                                     #\n#######################################################################################################################\n\n# for command `selefra logout`\ncd ../../../\ngo build\nrm -rf ./test\nmkdir test\nmv selefra.exe ./test\ncd test\necho \"begin run command selefra logout\"\n./selefra.exe logout $@\n\n"
  },
  {
    "path": "cmd/module/get.go",
    "content": "package module\n\n\n"
  },
  {
    "path": "cmd/module/list.go",
    "content": "package module\n"
  },
  {
    "path": "cmd/module/search.go",
    "content": "package module\n"
  },
  {
    "path": "cmd/module/tidy.go",
    "content": "package module\n\n\n"
  },
  {
    "path": "cmd/provider/install.go",
    "content": "package provider\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/providers/local_providers_manager\"\n\t\"github.com/selefra/selefra/pkg/version\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc newCmdProviderInstall() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:              \"install\",\n\t\tShort:            \"Install one or more providers, for example: selefra provider install aws\",\n\t\tLong:             \"Install one or more providers, for example: selefra provider install aws\",\n\t\tPersistentPreRun: global.DefaultWrappedInit(),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tctx := cmd.Context()\n\t\t\tdownloadDirectory, err := config.GetDefaultDownloadCacheDirectory()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn Install(ctx, downloadDirectory, args...)\n\t\t},\n\t}\n\n\tcmd.SetHelpFunc(cmd.HelpFunc())\n\treturn cmd\n}\n\nfunc Install(ctx context.Context, downloadWorkspace string, requiredProviders ...string) (err error) {\n\n\tif len(requiredProviders) == 0 {\n\t\tcli_ui.Errorf(\"Please specify one or more providers to install, for example: selefra provider install aws \\n\")\n\t\treturn nil\n\t}\n\n\tmanager, err := local_providers_manager.NewLocalProvidersManager(downloadWorkspace)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, nameAndVersionString := range requiredProviders {\n\t\tnameAndVersion := version.ParseNameAndVersion(nameAndVersionString)\n\t\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\t\te := cli_ui.PrintDiagnostics(message)\n\t\t\tif err == nil {\n\t\t\t\terr = e\n\t\t\t}\n\t\t})\n\t\tmanager.InstallProvider(ctx, &local_providers_manager.InstallProvidersOptions{\n\t\t\tRequiredProvider: local_providers_manager.NewLocalProvider(nameAndVersion.Name, nameAndVersion.Version),\n\t\t\tMessageChannel:   messageChannel,\n\t\t})\n\t\tmessageChannel.ReceiverWait()\n\t}\n\treturn err\n}\n\n//func install(ctx context.Context, args []string) error {\n//\tconfigYaml, err := config.GetConfig()\n//\tif err != nil {\n//\t\tcli_ui.Errorln(err.Error())\n//\t\treturn err\n//\t}\n//\n//\tnamespace, _, err := utils.Home()\n//\tif err != nil {\n//\t\tcli_ui.Errorln(err.Error())\n//\t\treturn nil\n//\t}\n//\n//\tprovider := registry.NewProviderRegistry(namespace)\n//\tfor _, s := range args {\n//\t\tsplitArr := strings.Split(s, \"@\")\n//\t\tvar name string\n//\t\tvar version string\n//\t\tif len(splitArr) > 1 {\n//\t\t\tname = splitArr[0]\n//\t\t\tversion = splitArr[1]\n//\t\t} else {\n//\t\t\tname = splitArr[0]\n//\t\t\tversion = \"latest\"\n//\t\t}\n//\t\tpr := registry.Provider{\n//\t\t\tName:    name,\n//\t\t\tVersion: version,\n//\t\t\tSource:  \"\",\n//\t\t}\n//\t\tp, err := provider.Download(ctx, pr, true)\n//\t\tcontinueFlag := false\n//\t\tfor _, provider := range configYaml.Selefra.ProviderDecls {\n//\t\t\tproviderName := *provider.Source\n//\t\t\tif strings.ToLower(providerName) == strings.ToLower(p.Name) && strings.ToLower(provider.Version) == strings.ToLower(p.Version) {\n//\t\t\t\tcontinueFlag = true\n//\t\t\t\tbreak\n//\t\t\t}\n//\t\t}\n//\t\tif continueFlag {\n//\t\t\tcli_ui.Warningln(fmt.Sprintf(\"ProviderBlock %s@%s already installed\", p.Name, p.Version))\n//\t\t\tcontinue\n//\t\t}\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorf(\"Installed %s@%s failed：%s\", p.Name, p.Version, err.Error())\n//\t\t\treturn nil\n//\t\t} else {\n//\t\t\tcli_ui.Infof(\"Installed %s@%s verified\", p.Name, p.Version)\n//\t\t}\n//\t\tcli_ui.Infof(\"Synchronization %s@%s's config...\", p.Name, p.Version)\n//\t\tplug, err := plugin.NewManagedPlugin(p.Filepath, p.Name, p.Version, \"\", nil)\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorf(\"Synchronization %s@%s's config failed：%s\", p.Name, p.Version, err.Error())\n//\t\t\treturn nil\n//\t\t}\n//\n//\t\tplugProvider := plug.Provider()\n//\t\tstorageOpt := pgstorage.DefaultPgStorageOpts()\n//\t\topt, err := json.Marshal(storageOpt)\n//\t\tinitRes, err := plugProvider.Init(ctx, &shard.ProviderInitRequest{\n//\t\t\tModuleLocalDirectory: pointer.ToStringPointer(global.WorkSpace()),\n//\t\t\tStorage: &shard.Storage{\n//\t\t\t\tType:           0,\n//\t\t\t\tStorageOptions: opt,\n//\t\t\t},\n//\t\t\tIsInstallInit:  pointer.TruePointer(),\n//\t\t\tProviderConfig: pointer.ToStringPointer(\"\"),\n//\t\t})\n//\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorln(err.Error())\n//\t\t\treturn nil\n//\t\t}\n//\n//\t\tif initRes != nil && initRes.Diagnostics != nil {\n//\t\t\terr := cli_ui.PrintDiagnostic(initRes.Diagnostics.GetDiagnosticSlice())\n//\t\t\tif err != nil {\n//\t\t\t\treturn nil\n//\t\t\t}\n//\t\t}\n//\n//\t\tres, err := plugProvider.GetProviderInformation(ctx, &shard.GetProviderInformationRequest{})\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorf(\"Synchronization %s@%s's config failed：%s\", p.Name, p.Version, err.Error())\n//\t\t\treturn nil\n//\t\t}\n//\t\tcli_ui.Infof(\"Synchronization %s@%s's config successful\", p.Name, p.Version)\n//\t\terr = tools.AppendProviderDecl(p, configYaml, version)\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorln(err.Error())\n//\t\t\treturn nil\n//\t\t}\n//\t\thasProvider := false\n//\t\tfor _, Node := range configYaml.Providers.Content {\n//\t\t\tif Node.Kind == yaml.ScalarNode && Node.Value == p.Name {\n//\t\t\t\thasProvider = true\n//\t\t\t\tbreak\n//\t\t\t}\n//\t\t}\n//\t\tif !hasProvider {\n//\t\t\terr = tools.SetProviderTmpl(res.DefaultConfigTemplate, p, configYaml)\n//\t\t}\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorf(\"set %s@%s's config failed：%s\", p.Name, p.Version, err.Error())\n//\t\t\treturn nil\n//\t\t}\n//\t}\n//\n//\tstr, err := yaml.Marshal(configYaml)\n//\tif err != nil {\n//\t\tcli_ui.Errorln(err.Error())\n//\t\treturn nil\n//\t}\n//\tpath, err := config.GetConfigPath()\n//\tif err != nil {\n//\t\tcli_ui.Errorln(err.Error())\n//\t\treturn nil\n//\t}\n//\terr = os.WriteFile(path, str, 0644)\n//\treturn nil\n//}\n"
  },
  {
    "path": "cmd/provider/install_online_test.go",
    "content": "package provider\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestInstallOnline(t *testing.T) {\n//\tif testing.Short() {\n//\t\tt.Skip(\"skipping test in short mode.\")\n//\t\treturn\n//\t}\n//\tglobal.Init(\"TestInstallOnline\", global.WithWorkspace(\"../../tests/workspace/online\"))\n//\tglobal.SetToken(\"xxxxxxxxxxxxxxxxxxxxxx\")\n//\tglobal.SERVER = \"dev-api.selefra.io\"\n//\tctx := context.Background()\n//\terr := install(ctx, []string{\"aws@latest\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n"
  },
  {
    "path": "cmd/provider/install_test.go",
    "content": "package provider\n\nimport (\n\t\"context\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"github.com/spf13/cobra\"\n//\t\"github.com/stretchr/testify/require\"\n//\t\"testing\"\n//)\n//\n//func TestInstall(t *testing.T) {\n//\tglobal.Init(\"TestInstall\", global.WithWorkspace(\"../../tests/workspace/offline\"))\n//\n//\tctx := context.Background()\n//\terr := install(ctx, []string{\"aws@latest\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n//\n//func TestInstallCmd(t *testing.T) {\n//\trootCmd := &cobra.Command{\n//\t\tUse: \"provider\",\n//\t}\n//\tinstallCmd := newCmdProviderInstall()\n//\trootCmd.AddCommand(installCmd)\n//\n//\trequire.Equal(t, \"provider install\", global.Cmd())\n//}\n\nfunc Test_install(t *testing.T) {\n\terr := Install(context.Background(), \"./test_download\", \"mock\")\n\tassert.Nil(t, err)\n}\n"
  },
  {
    "path": "cmd/provider/list.go",
    "content": "package provider\n\nimport (\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/providers/local_providers_manager\"\n\t\"github.com/selefra/selefra/pkg/version\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc newCmdProviderList() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:              \"list\",\n\t\tShort:            \"List currently installed providers\",\n\t\tLong:             \"List currently installed providers\",\n\t\tPersistentPreRun: global.DefaultWrappedInit(),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\n\t\t\tdownloadWorkspace, err := config.GetDefaultDownloadCacheDirectory()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn List(downloadWorkspace)\n\t\t},\n\t}\n\n\tcmd.SetHelpFunc(cmd.HelpFunc())\n\treturn cmd\n}\n\nfunc List(downloadWorkspace string) error {\n\n\tmanager, err := local_providers_manager.NewLocalProvidersManager(downloadWorkspace)\n\tif err != nil {\n\t\treturn err\n\t}\n\tproviders, diagnostics := manager.ListProviders()\n\tif err := cli_ui.PrintDiagnostics(diagnostics); err != nil {\n\t\treturn err\n\t}\n\tif len(providers) == 0 {\n\t\treturn nil\n\t}\n\n\ttable := make([][]string, 0)\n\tfor _, provider := range providers {\n\t\tversions := make([]string, 0)\n\t\tfor versionString := range provider.ProviderVersionMap {\n\t\t\tversions = append(versions, versionString)\n\t\t}\n\t\tversion.Sort(versions)\n\t\tfor _, versionString := range versions {\n\t\t\ttable = append(table, []string{\n\t\t\t\tprovider.ProviderName, versionString, provider.ProviderVersionMap[versionString].ExecutableFilePath,\n\t\t\t})\n\t\t}\n\t}\n\tcli_ui.ShowTable([]string{\"Name\", \"Version\", \"Source\"}, table, nil, true)\n\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/provider/list_online_test.go",
    "content": "package provider\n\n//import (\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestListOnline(t *testing.T) {\n//\tif testing.Short() {\n//\t\tt.Skip(\"skipping test in short mode.\")\n//\t\treturn\n//\t}\n//\tglobal.Init(\"TestListOnline\", global.WithWorkspace(\"../../tests/workspace/online\"))\n//\tglobal.SetToken(\"xxxxxxxxxxxxxxxxxxxxxx\")\n//\tglobal.SERVER = \"dev-api.selefra.io\"\n//\terr := list()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n"
  },
  {
    "path": "cmd/provider/list_test.go",
    "content": "package provider\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\n//import (\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestList(t *testing.T) {\n//\tglobal.Init(\"TestList\", global.WithWorkspace(\"../../tests/workspace/offline\"))\n//\terr := list()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n\nfunc Test_list(t *testing.T) {\n\terr := List(\"./test_download\")\n\tassert.Nil(t, err)\n}\n"
  },
  {
    "path": "cmd/provider/provider.go",
    "content": "package provider\n\nimport (\n\t\"github.com/spf13/cobra\"\n)\n\nfunc NewProviderCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:   \"provider [command]\",\n\t\tShort: \"Top-level command to interact with providers\",\n\t\tLong:  \"Top-level command to interact with providers\",\n\t}\n\n\t//cmd.AddCommand(newCmdProviderUpdate(), newCmdProviderRemove(), newCmdProviderRemove(), newCmdProviderList(), newCmdProviderInstall())\n\tcmd.AddCommand(newCmdProviderRemove(), newCmdProviderList(), newCmdProviderInstall())\n\n\tcmd.SetHelpFunc(cmd.HelpFunc())\n\treturn cmd\n}\n"
  },
  {
    "path": "cmd/provider/remove.go",
    "content": "package provider\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/providers/local_providers_manager\"\n\t\"github.com/spf13/cobra\"\n)\n\nfunc newCmdProviderRemove() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:              \"remove\",\n\t\tShort:            \"Remove providers one or more from the download cache, for example: selefra provider remove aws@v0.0.1\",\n\t\tLong:             \"Remove providers one or more from the download cache, for example: selefra provider remove aws@v0.0.1\",\n\t\tPersistentPreRun: global.DefaultWrappedInit(),\n\t\tRunE: func(cmd *cobra.Command, names []string) error {\n\t\t\tdownloadDirectory, err := config.GetDefaultDownloadCacheDirectory()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn Remove(cmd.Context(), downloadDirectory, names...)\n\t\t},\n\t}\n\n\tcmd.SetHelpFunc(cmd.HelpFunc())\n\treturn cmd\n}\n\nfunc Remove(ctx context.Context, downloadWorkspace string, names ...string) error {\n\tmanager, err := local_providers_manager.NewLocalProvidersManager(downloadWorkspace)\n\tif err != nil {\n\t\treturn err\n\t}\n\td := manager.RemoveProviders(ctx, names...)\n\treturn cli_ui.PrintDiagnostics(d)\n}\n\n//func Remove(names []string) error {\n//\targsMap := make(map[string]bool)\n//\tfor i := range names {\n//\t\targsMap[names[i]] = true\n//\t}\n//\tdeletedMap := make(map[string]bool)\n//\tcof, err := config.GetConfig()\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tnamespace, _, err := utils.Home()\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tprovider := registry.NewProviderRegistry(namespace)\n//\n//\tfor _, p := range cof.Selefra.ProviderDecls {\n//\t\tname := *p.Source\n//\t\tpath := utils.GetPathBySource(*p.Source, p.Version)\n//\t\tprov := registry.ProviderBinary{\n//\t\t\tProvider: registry.Provider{\n//\t\t\t\tName:    name,\n//\t\t\t\tVersion: p.Version,\n//\t\t\t\tSource:  \"\",\n//\t\t\t},\n//\t\t\tFilepath: path,\n//\t\t}\n//\t\tif !argsMap[p.Name] || deletedMap[p.Path] {\n//\t\t\tbreak\n//\t\t}\n//\n//\t\terr := provider.DeleteProvider(prov)\n//\t\tif err != nil {\n//\t\t\tif !errors.Is(err, os.ErrNotExist) {\n//\t\t\t\tcli_ui.Warningf(\"Failed to remove  %s: %s\", p.Name, err.Error())\n//\t\t\t}\n//\t\t}\n//\t\t_, jsonPath, err := utils.Home()\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\t\tc, err := os.ReadFile(jsonPath)\n//\t\tif err == nil {\n//\t\t\tvar configMap = make(map[string]string)\n//\t\t\terr = json.Unmarshal(c, &configMap)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\tdelete(configMap, *p.Source+\"@\"+p.Version)\n//\t\t\tc, err = json.Marshal(configMap)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\terr = os.Remove(jsonPath)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\terr = os.WriteFile(jsonPath, c, 0644)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\tdeletedMap[path] = true\n//\t\t}\n//\t\tcli_ui.Infof(\"Removed %s success\", *p.Source)\n//\t}\n//\treturn nil\n//}\n"
  },
  {
    "path": "cmd/provider/remove_online_test.go",
    "content": "package provider\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestRemoveOnline(t *testing.T) {\n//\tif testing.Short() {\n//\t\tt.Skip(\"skipping test in short mode.\")\n//\t\treturn\n//\t}\n//\tglobal.Init(\"TestRemoveOnline\", global.WithWorkspace(\"../../tests/workspace/online\"))\n//\tglobal.SetToken(\"xxxxxxxxxxxxxxxxxxxxxx\")\n//\tglobal.SERVER = \"dev-api.selefra.io\"\n//\terr := Remove([]string{\"aws\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\terr = install(context.Background(), []string{\"aws@latest\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n"
  },
  {
    "path": "cmd/provider/remove_test.go",
    "content": "package provider\n\nimport (\n\t\"context\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestRemove(t *testing.T) {\n//\tglobal.Init(\"TestRemove\", global.WithWorkspace(\"../../tests/workspace/offline\"))\n//\terr := Remove([]string{\"aws\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\terr = install(context.Background(), []string{\"aws@latest\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n\nfunc TestRemove(t *testing.T) {\n\n\tprovider := \"mock@v0.0.3\"\n\n\terr := Install(context.Background(), \"./test_download\", provider)\n\tassert.Nil(t, err)\n\n\terr = Remove(context.Background(), \"./test_download\", provider)\n\tassert.Nil(t, err)\n}\n"
  },
  {
    "path": "cmd/provider/sync.go",
    "content": "package provider\n\n//import (\n//\t\"context\"\n//\t\"fmt\"\n//\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n//\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n//\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n//\t\"github.com/selefra/selefra/cli_ui\"\n//\t\"github.com/selefra/selefra/cmd/fetch\"\n//\t\"github.com/selefra/selefra/cmd/test\"\n//\t\"github.com/selefra/selefra/cmd/tools\"\n//\t\"github.com/selefra/selefra/config\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"github.com/selefra/selefra/pkg/cloud_sdk\"\n//\t\"github.com/selefra/selefra/pkg/logger\"\n//\t\"github.com/selefra/selefra/pkg/registry\"\n//\t\"github.com/selefra/selefra/pkg/storage/pgstorage\"\n//\t\"github.com/selefra/selefra/pkg/utils\"\n//\t\"path/filepath\"\n//\t\"time\"\n//)\n//\n//type lockStruct struct {\n//\tSchemaKey string\n//\tUuid      string\n//\tStorage   *postgresql_storage.PostgresqlStorage\n//}\n//\n//// effectiveDecls check provider decls and download provider binary file, return the effective providers\n//func effectiveDecls(ctx context.Context, decls []*config.RequireProvider) (effects []*config.RequireProvider, errlogs []string) {\n//\tnamespace, _, err := utils.Home()\n//\tif err != nil {\n//\t\terrlogs = append(errlogs, err.Error())\n//\t\treturn\n//\t}\n//\tprovider := registry.NewProviderRegistry(namespace)\n//\tcli_ui.Infof(\"Selefra has been successfully installed providers!\\n\\n\")\n//\tcli_ui.Infof(\"Checking Selefra provider updates......\\n\")\n//\n//\tfor _, decl := range decls {\n//\t\tconfigVersion := decl.Version\n//\t\tprov := registry.Provider{\n//\t\t\tName:    decl.Name,\n//\t\t\tVersion: decl.Version,\n//\t\t\tSource:  \"\",\n//\t\t\tPath:    decl.Path,\n//\t\t}\n//\t\tpp, err := provider.Download(ctx, prov, true)\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorf(\"%s@%s failed updated：%s\", decl.Name, decl.Version, err.Error())\n//\t\t\terrlogs = append(errlogs, err.Error())\n//\t\t\tcontinue\n//\t\t} else {\n//\t\t\tdecl.Path = pp.Filepath\n//\t\t\tdecl.Version = pp.Version\n//\t\t\terr = tools.AppendProviderDecl(pp, nil, configVersion)\n//\t\t\tif err != nil {\n//\t\t\t\tcli_ui.Errorf(\"%s@%s failed updated：%s\", decl.Name, decl.Version, err.Error())\n//\t\t\t\terrlogs = append(errlogs, err.Error())\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t\teffects = append(effects, decl)\n//\t\t\tcli_ui.Infof(\"\t%s@%s all ready updated!\\n\", decl.Name, decl.Version)\n//\t\t}\n//\t}\n//\n//\treturn effects, nil\n//}\n//\n//func Sync(ctx context.Context) (lockSlice []lockStruct, err error) {\n//\t// load and check config\n//\tcli_ui.Infof(\"Initializing provider plugins...\\n\\n\")\n//\trootConfig, err := config.GetConfig()\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\n//\tif err = test.CheckSelefraConfig(ctx, rootConfig); err != nil {\n//\t\t_ = http_client.TrySetUpStage(global.RelvPrjName(), http_client.Failed)\n//\t\treturn nil, err\n//\t}\n//\n//\tif _, err := cloud_sdk.UploadLogStatus(); err != nil {\n//\t\tcli_ui.Errorln(err.Error())\n//\t}\n//\n//\tvar errored bool\n//\n//\tproviderDecls, errLogs := effectiveDecls(ctx, rootConfig.Selefra.ProviderDecls)\n//\n//\tcli_ui.Infof(\"Selefra has been finished update providers!\\n\")\n//\n//\tglobal.SetStage(\"pull\")\n//\tfor _, decl := range providerDecls {\n//\t\tprvds := tools.ProvidersByID(rootConfig, decl.Name)\n//\t\tfor _, prvd := range prvds {\n//\n//\t\t\t// build a postgresql storage\n//\t\t\tschemaKey := config.GetSchemaKey(decl, *prvd)\n//\t\t\tstore, err := pgstorage.PgStorageWithMeta(ctx, &schema.ClientMeta{\n//\t\t\t\tClientLogger: logger.NewSchemaLogger(),\n//\t\t\t}, pgstorage.WithSearchPath(config.GetSchemaKey(decl, *prvd)))\n//\t\t\tif err != nil {\n//\t\t\t\terrored = true\n//\t\t\t\tcli_ui.Errorf(\"%s@%s failed updated：%s\", decl.Name, decl.Version, err.Error())\n//\t\t\t\terrLogs = append(errLogs, fmt.Sprintf(\"%s@%s failed updated：%s\", decl.Name, decl.Version, err.Error()))\n//\t\t\t\tcontinue\n//\t\t\t}\n//\n//\t\t\t// try lock\n//\t\t\t// TODO: check unlock\n//\t\t\tuuid := id_util.RandomId()\n//\t\t\tfor {\n//\t\t\t\terr = store.Lock(ctx, schemaKey, uuid)\n//\t\t\t\tif err == nil {\n//\t\t\t\t\tlockSlice = append(lockSlice, lockStruct{\n//\t\t\t\t\t\tSchemaKey: schemaKey,\n//\t\t\t\t\t\tUuid:      uuid,\n//\t\t\t\t\t\tStorage:   store,\n//\t\t\t\t\t})\n//\t\t\t\t\tbreak\n//\t\t\t\t}\n//\t\t\t\ttime.Sleep(5 * time.Second)\n//\t\t\t}\n//\n//\t\t\t// check if cache expired\n//\t\t\texpired, _ := tools.CacheExpired(ctx, store, prvd.Cache)\n//\t\t\tif !expired {\n//\t\t\t\tcli_ui.Infof(\"%s %s@%s pull infrastructure data:\\n\", prvd.Name, decl.Name, decl.Version)\n//\t\t\t\tcli_ui.Print(fmt.Sprintf(\"Pulling %s@%s Please wait for resource information ...\", decl.Name, decl.Version), false)\n//\t\t\t\tcli_ui.Infof(\"\t%s@%s all ready use cache!\\n\", decl.Name, decl.Version)\n//\t\t\t\tcontinue\n//\t\t\t}\n//\n//\t\t\t// if expired, fetch new data\n//\t\t\terr = fetch.Fetch(ctx, decl, prvd)\n//\t\t\tif err != nil {\n//\t\t\t\tcli_ui.Errorf(\"%s %s Synchronization failed：%s\", decl.Name, decl.Version, err.Error())\n//\t\t\t\terrored = true\n//\t\t\t\tcontinue\n//\t\t\t}\n//\n//\t\t\t// set fetch time\n//\t\t\tif err := pgstorage.SetStorageValue(ctx, store, config.GetCacheKey(), time.Now().Format(time.RFC3339)); err != nil {\n//\t\t\t\tcli_ui.Warningf(\"%s %s set cache time failed：%s\", decl.Name, decl.Version, err.Error())\n//\t\t\t\terrored = true\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t}\n//\t}\n//\tif errored {\n//\t\tcli_ui.Errorf(`\n//This may be exception, view detailed exception in %s .\n//`, filepath.Join(global.WorkSpace(), \"logs\"))\n//\t}\n//\n//\treturn lockSlice, nil\n//}\n"
  },
  {
    "path": "cmd/provider/sync_online_test.go",
    "content": "package provider\n\n//func TestSyncOnline(t *testing.T) {\n//\tif testing.Short() {\n//\t\tt.Skip(\"skipping test in short mode.\")\n//\t\treturn\n//\t}\n//\tglobal.Init(\"TestSyncOnline\", global.WithWorkspace(\"../../tests/workspace/online\"))\n//\tglobal.SetToken(\"xxxxxxxxxxxxxxxxxxxxxx\")\n//\tglobal.SERVER = \"dev-api.selefra.io\"\n//\t_, err := Sync(nil)\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\tif len(errLogs) != 0 {\n//\t\tt.Error(errLogs)\n//\t}\n//}\n"
  },
  {
    "path": "cmd/provider/sync_test.go",
    "content": "package provider\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/config\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"github.com/stretchr/testify/require\"\n//\t\"testing\"\n//)\n//\n//func Test_effectiveDecls(t *testing.T) {\n//\tctx := context.Background()\n//\tglobal.Init(\"\", global.WithWorkspace(\"../../tests/workspace/offline\"))\n//\trootConfig, err := config.GetConfig()\n//\tif err != nil {\n//\t\tt.Fatal(err)\n//\t}\n//\n//\tdecls, _ := effectiveDecls(ctx, rootConfig.Selefra.ProviderDecls)\n//\n//\trequire.Equal(t, 1, len(decls))\n//\n//\trequire.Equal(t, \"aws\", decls[0].Name)\n//\trequire.Equal(t, \"v0.0.9\", decls[0].Version)\n//}\n"
  },
  {
    "path": "cmd/provider/test_data/provider.sh",
    "content": "#!/usr/bin/env bash\n#######################################################################################################################\n#                                                                                                                     #\n#                              This script helps you test interactive programs                                        #\n#                                                                                                                     #\n#                                                                                                                     #\n#                                                                                                   Version: 0.0.1    #\n#                                                                                                                     #\n#######################################################################################################################\n\n# for command `selefra provider`\ncd ../../../\ngo build\nrm -rf ./test\nmkdir test\nmv selefra.exe ./test\ncd test\necho \"begin run command selefra provider\"\n./selefra.exe provider $@\n\n"
  },
  {
    "path": "cmd/provider/update.go",
    "content": "package provider\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/cli_ui\"\n//\t\"github.com/selefra/selefra/cmd/fetch\"\n//\t\"github.com/selefra/selefra/cmd/tools\"\n//\t\"github.com/selefra/selefra/config\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"github.com/selefra/selefra/pkg/registry\"\n//\t\"github.com/selefra/selefra/pkg/utils\"\n//\t\"github.com/spf13/cobra\"\n//)\n//\n//func newCmdProviderUpdate() *cobra.Command {\n//\tcmd := &cobra.Command{\n//\t\tUse:              \"update\",\n//\t\tShort:            \"Upgrade one or more plugins\",\n//\t\tLong:             \"Upgrade one or more plugins\",\n//\t\tPersistentPreRun: global.DefaultWrappedInit(),\n//\t\tRunE: func(cmd *cobra.Command, args []string) error {\n//\t\t\treturn update(cmd.Context(), args)\n//\t\t},\n//\t}\n//\n//\tcmd.SetHelpFunc(cmd.HelpFunc())\n//\treturn cmd\n//}\n//\n//func update(ctx context.Context, args []string) error {\n//\terr := config.IsSelefra()\n//\tif err != nil {\n//\t\tcli_ui.Errorln(err.Error())\n//\t\treturn err\n//\t}\n//\targsMap := make(map[string]bool)\n//\tfor i := range args {\n//\t\targsMap[args[i]] = true\n//\t}\n//\trootConfig, err := config.GetConfig()\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tnamespace, _, err := utils.Home()\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tprovider := registry.NewProviderRegistry(namespace)\n//\tfor _, decl := range rootConfig.Selefra.ProviderDecls {\n//\t\tprov := registry.ProviderBinary{\n//\t\t\tProvider: registry.Provider{\n//\t\t\t\tName:    decl.Name,\n//\t\t\t\tVersion: decl.Version,\n//\t\t\t\tSource:  \"\",\n//\t\t\t},\n//\t\t\tFilepath: decl.Path,\n//\t\t}\n//\t\tif len(args) != 0 && !argsMap[decl.Name] {\n//\t\t\tbreak\n//\t\t}\n//\n//\t\tpp, err := provider.CheckUpdate(ctx, prov)\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\t\tdecl.Path = pp.Filepath\n//\t\tdecl.Version = pp.Version\n//\n//\t\tfor _, prvd := range tools.ProvidersByID(rootConfig, decl.Name) {\n//\t\t\terr = fetch.Fetch(ctx, decl, prvd)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t}\n//\t}\n//\treturn nil\n//}\n"
  },
  {
    "path": "cmd/provider/update_online_test.go",
    "content": "package provider\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestUpdateOnline(t *testing.T) {\n//\tif testing.Short() {\n//\t\tt.Skip(\"skipping test in short mode.\")\n//\t\treturn\n//\t}\n//\tglobal.Init(\"TestRemoveOnline\", global.WithWorkspace(\"../../tests/workspace/online\"))\n//\tglobal.SetToken(\"xxxxxxxxxxxxxxxxxxxxxx\")\n//\tglobal.SERVER = \"dev-api.selefra.io\"\n//\tctx := context.Background()\n//\targ := []string{\"aws\"}\n//\terr := update(ctx, arg)\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n"
  },
  {
    "path": "cmd/provider/update_test.go",
    "content": "package provider\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestUpdate(t *testing.T) {\n//\tglobal.Init(\"TestUpdate\", global.WithWorkspace(\"../../tests/workspace/offline\"))\n//\tctx := context.Background()\n//\targ := []string{\"aws\"}\n//\terr := update(ctx, arg)\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n"
  },
  {
    "path": "cmd/query/client.go",
    "content": "package query\n\nimport (\n\t\"context\"\n\t\"github.com/c-bata/go-prompt\"\n\t\"github.com/selefra/selefra-provider-sdk/storage\"\n\t\"github.com/selefra/selefra-provider-sdk/storage_factory\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"os\"\n\t\"strings\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// SQLQueryClient TODO Optimize the experience of writing SQL statements\ntype SQLQueryClient struct {\n\tstorageType storage_factory.StorageType\n\tStorage     storage.Storage\n\n\tTables  []prompt.Suggest\n\tColumns []prompt.Suggest\n}\n\nfunc NewQueryClient(ctx context.Context, storageType storage_factory.StorageType, storage storage.Storage) (*SQLQueryClient, error) {\n\tclient := &SQLQueryClient{\n\t\tstorageType: storageType,\n\t\tStorage:     storage,\n\t}\n\n\t// TODO BUG: If you switch schema, the hints here will be outdated\n\tclient.initTablesSuggest(ctx)\n\tclient.initColumnsSuggest(ctx)\n\n\treturn client, nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *SQLQueryClient) Run(ctx context.Context) {\n\n\tcli_ui.Infof(\"You can end the session by typing `exit` and press enter, now enter your query statement: \\n\")\n\n\tp := prompt.New(func(in string) {\n\n\t\tin = strings.TrimSpace(in)\n\t\tstrArr := strings.Split(in, \"\\\\\")\n\t\ts := strArr[0]\n\n\t\tlowerSql := strings.ToLower(s)\n\t\tif lowerSql == \"exit\" || lowerSql == \"exit;\" || lowerSql == \".exit\" {\n\t\t\tcli_ui.Infof(\"Bye.\")\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tres, err := x.Storage.Query(ctx, s)\n\t\tif err != nil {\n\t\t\tcli_ui.Errorln(err)\n\t\t} else {\n\t\t\ttables, e := res.ReadRows(-1)\n\t\t\tif e != nil && e.HasError() {\n\t\t\t\tcli_ui.Errorln(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\theader := tables.GetColumnNames()\n\t\t\tbody := tables.GetMatrix()\n\t\t\tvar tableBody [][]string\n\t\t\tfor i := range body {\n\t\t\t\tvar row []string\n\t\t\t\tfor j := range body[i] {\n\t\t\t\t\trow = append(row, utils.Strava(body[i][j]))\n\t\t\t\t}\n\t\t\t\ttableBody = append(tableBody, row)\n\t\t\t}\n\n\t\t\t// \\g or \\G use row mode show query result\n\t\t\tif len(strArr) > 1 && (strArr[1] == \"g\" || strArr[1] == \"G\") {\n\t\t\t\tcli_ui.ShowRows(header, tableBody, []string{}, true)\n\t\t\t} else {\n\t\t\t\tcli_ui.ShowTable(header, tableBody, []string{}, true)\n\t\t\t}\n\n\t\t}\n\n\t}, x.completer,\n\t\tprompt.OptionTitle(\"Table\"),\n\t\tprompt.OptionPrefix(\"> \"),\n\t\tprompt.OptionAddKeyBind(prompt.KeyBind{\n\t\t\tKey: prompt.ControlC,\n\t\t\tFn: func(buffer *prompt.Buffer) {\n\t\t\t\tos.Exit(0)\n\t\t\t},\n\t\t}),\n\t)\n\tp.Run()\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// if there are no spaces this is the first word\nfunc (x *SQLQueryClient) isFirstWord(text string) bool {\n\treturn strings.LastIndex(text, \" \") == -1\n}\n\nfunc (x *SQLQueryClient) completer(d prompt.Document) []prompt.Suggest {\n\ttext := d.TextBeforeCursor()\n\ts := x.formatSuggest(d.Text, text)\n\treturn prompt.FilterHasPrefix(s, d.GetWordBeforeCursor(), true)\n}\n\nfunc (x *SQLQueryClient) formatSuggest(text string, before string) []prompt.Suggest {\n\tvar s []prompt.Suggest\n\tif x.isFirstWord(text) {\n\t\tif text != \"\" {\n\t\t\ts = []prompt.Suggest{\n\t\t\t\t{Text: \"SELECT\"},\n\t\t\t\t{Text: \"WITH\"},\n\t\t\t}\n\t\t}\n\t} else {\n\t\ttexts := strings.Split(before, \" \")\n\t\tif strings.ToLower(texts[len(texts)-2]) == \"from\" {\n\t\t\ts = x.Tables\n\t\t}\n\t\tif strings.ToLower(texts[len(texts)-2]) == \"select\" {\n\t\t\ts = x.Columns\n\t\t}\n\t\tif strings.ToLower(texts[len(texts)-2]) == \",\" {\n\t\t\ts = x.Columns\n\t\t}\n\t}\n\treturn s\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *SQLQueryClient) initTablesSuggest(ctx context.Context) {\n\tres, diag := x.Storage.Query(ctx, x.getTablesSuggestSQL())\n\tvar tables []prompt.Suggest\n\tif diag != nil {\n\t\t_ = cli_ui.PrintDiagnostic(diag.GetDiagnosticSlice())\n\t} else {\n\t\trows, diag := res.ReadRows(-1)\n\t\tif diag != nil {\n\t\t\t_ = cli_ui.PrintDiagnostic(diag.GetDiagnosticSlice())\n\t\t}\n\t\tfor i := range rows.GetMatrix() {\n\t\t\ttableName := rows.GetMatrix()[i][0].(string)\n\t\t\ttables = append(tables, prompt.Suggest{Text: tableName})\n\t\t}\n\t}\n\tx.Tables = tables\n}\n\nfunc (x *SQLQueryClient) getTablesSuggestSQL() string {\n\tswitch x.storageType {\n\tcase storage_factory.StorageTypePostgresql:\n\t\treturn TABLESQL\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *SQLQueryClient) initColumnsSuggest(ctx context.Context) {\n\trs, err := x.Storage.Query(ctx, x.getColumnsSuggestSQL())\n\tvar columns []prompt.Suggest\n\tif err != nil {\n\t\t_ = cli_ui.PrintDiagnostic(err.GetDiagnosticSlice())\n\t} else {\n\t\trows, err := rs.ReadRows(-1)\n\t\tif err != nil {\n\t\t\t_ = cli_ui.PrintDiagnostic(err.GetDiagnosticSlice())\n\t\t}\n\t\tfor i := range rows.GetMatrix() {\n\t\t\tschemaName := rows.GetMatrix()[i][0].(string)\n\t\t\ttableName := rows.GetMatrix()[i][1].(string)\n\t\t\tcolumnName := rows.GetMatrix()[i][2].(string)\n\t\t\tcolumns = append(columns, prompt.Suggest{Text: columnName, Description: schemaName + \".\" + tableName})\n\t\t}\n\t}\n\tx.Columns = columns\n}\n\nfunc (x *SQLQueryClient) getColumnsSuggestSQL() string {\n\tswitch x.storageType {\n\tcase storage_factory.StorageTypePostgresql:\n\t\treturn COLUMNSQL\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "cmd/query/client_test.go",
    "content": "package query\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/env\"\n\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n\t\"github.com/selefra/selefra-provider-sdk/storage_factory\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"testing\"\n)\n\nfunc TestNewQueryClient(t *testing.T) {\n\tctx := context.Background()\n\n\toptions := postgresql_storage.NewPostgresqlStorageOptions(env.GetDatabaseDsn())\n\tstorage, diagnostics := storage_factory.NewStorage(context.Background(), storage_factory.StorageTypePostgresql, options)\n\tif err := cli_ui.PrintDiagnostics(diagnostics); err != nil {\n\t\treturn\n\t}\n\tqueryClient, _ := NewQueryClient(ctx, storage_factory.StorageTypePostgresql, storage)\n\tif queryClient == nil {\n\t\tt.Error(\"queryClient is nil\")\n\t}\n\tqueryClient.Run(context.Background())\n}\n\n//func TestNewQueryClientOnline(t *testing.T) {\n//\tctx := context.Background()\n//\tglobal.Init(\"query\", global.WithWorkspace(\"../../tests/workspace/online\"))\n//\tglobal.SetToken(\"xxxxxxxxxxxxxxxxxxxxxx\")\n//\tglobal.SERVER = \"dev-api.selefra.io\"\n//\n//\tqueryClient, _ := NewQueryClient(ctx)\n//\tif queryClient == nil {\n//\t\tt.Error(\"queryClient is nil\")\n//\t}\n//}\n\n//func TestCreateColumnsSuggest(t *testing.T) {\n//\tctx := context.Background()\n//\tglobal.Init(\"go_test\", global.WithWorkspace(\"../../tests/workspace/offline\"))\n//\tcof, err := config.GetConfig()\n//\tif err != nil {\n//\t\tcli_ui.Errorln(err)\n//\t}\n//\tfor i := range cof.Selefra.ProviderDecls {\n//\t\tconfs, err := tools.ProviderConfigStrs(cof, cof.Selefra.ProviderDecls[i].Name)\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorln(err.Error())\n//\t\t}\n//\t\tfor _, conf := range confs {\n//\t\t\tvar cp config.ProviderBlock\n//\t\t\terr := json.Unmarshal([]byte(conf), &cp)\n//\t\t\tif err != nil {\n//\t\t\t\tcli_ui.Errorln(err.Error())\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t\t//ctx, c, err := createCtxAndClient(*cof, cof.Selefra.RequireProvidersBlock[i], cp)\n//\t\t\t//if err != nil {\n//\t\t\t//\tt.Error(err)\n//\t\t\t//}\n//\t\t\tsto, _ := pgstorage.Storage(ctx)\n//\t\t\tcolumns := initColumnsSuggest(ctx, sto)\n//\t\t\tif columns == nil {\n//\t\t\t\tt.Error(\"Columns is nil\")\n//\t\t\t}\n//\t\t}\n//\t}\n//}\n//\n//func TestCreateTablesSuggest(t *testing.T) {\n//\tctx := context.Background()\n//\tglobal.Init(\"go_test\", global.WithWorkspace(\"../../tests/workspace/offline\"))\n//\tcof, err := config.GetConfig()\n//\tif err != nil {\n//\t\tcli_ui.Errorln(err)\n//\t}\n//\tfor i := range cof.Selefra.ProviderDecls {\n//\t\tconfs, err := tools.ProviderConfigStrs(cof, cof.Selefra.ProviderDecls[i].Name)\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorln(err.Error())\n//\t\t}\n//\t\tfor _, conf := range confs {\n//\t\t\tvar cp config.ProviderBlock\n//\t\t\terr := json.Unmarshal([]byte(conf), &cp)\n//\t\t\tif err != nil {\n//\t\t\t\tcli_ui.Errorln(err.Error())\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t\tsto, _ := pgstorage.Storage(ctx)\n//\t\t\ttables := initTablesSuggest(ctx, sto)\n//\t\t\tif tables == nil {\n//\t\t\t\tt.Error(\"Tables is nil\")\n//\t\t\t}\n//\t\t}\n//\t}\n//}\n"
  },
  {
    "path": "cmd/query/parse.go",
    "content": "package query\n\n//import (\n//\t\"github.com/blastrain/vitess-sqlparser/sqlparser\"\n//)\n//\n//// UnknownType 0\n//// Select 1\n//// Upgrade 2\n//// Insert 3\n//// Delete 4\n//\n//// Unknown => 0\n//// Eq -> \"=\" 1\n//// Ne -> \"!=\" 2\n//// Gt -> \">\" 3\n//// Lt -> \"<\" 4\n//// Gte -> \">=\" 5\n//// Lte -> \"<=\" 6\n//\n//func ParserSql(sql string) {\n//\tquery, err := sqlparser.Parse(sql)\n//\tif err != nil {\n//\t\tpanic(err)\n//\t}\n//\tquery.WalkSubtree(walk)\n//}\n//\n//func walk(node sqlparser.SQLNode) (kontinue bool, err error) {\n//\tif node != nil {\n//\t\tsqlparser.String(node)\n//\t}\n//\treturn true, nil\n//}\n"
  },
  {
    "path": "cmd/query/query.go",
    "content": "package query\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"github.com/selefra/selefra-provider-sdk/env\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n\t\"github.com/selefra/selefra-provider-sdk/storage_factory\"\n\t\"github.com/selefra/selefra-utils/pkg/dsn_util\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/cli_env\"\n\t\"github.com/selefra/selefra/pkg/cloud_sdk\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/module_loader\"\n\t\"github.com/selefra/selefra/pkg/storage/pgstorage\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/spf13/cobra\"\n\t\"os\"\n)\n\nfunc NewQueryCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:              \"query\",\n\t\tShort:            \"Query infrastructure data from pgstorage\",\n\t\tLong:             \"Query infrastructure data from pgstorage\",\n\t\tPersistentPreRun: global.DefaultWrappedInit(),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\t\tctx := cmd.Context()\n\n\t\t\tdownloadDirectory, err := config.GetDefaultDownloadCacheDirectory()\n\t\t\tif err != nil {\n\t\t\t\tcli_ui.Errorln(err.Error() + \" \\n\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tprojectWorkspace := \"./\"\n\n\t\t\tdsn, err := getDsn(ctx, projectWorkspace, downloadDirectory)\n\t\t\tif err != nil {\n\t\t\t\tcli_ui.Errorln(err.Error() + \"\\n\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// show tips\n\t\t\tc, err := dsn_util.NewConfigByDSN(dsn)\n\t\t\tif err != nil {\n\t\t\t\tcli_ui.Errorln(\"Parse dsn %s error: %s\", dsn, err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcli_ui.Infof(\"Connection to you database `%s` ... \\n\", c.ToDSN(true))\n\n\t\t\toptions := postgresql_storage.NewPostgresqlStorageOptions(dsn)\n\t\t\tdatabaseStorage, diagnostics := storage_factory.NewStorage(cmd.Context(), storage_factory.StorageTypePostgresql, options)\n\t\t\tif err := cli_ui.PrintDiagnostics(diagnostics); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tdatabaseStorage.Close()\n\t\t\t}()\n\n\t\t\tqueryClient, _ := NewQueryClient(ctx, storage_factory.StorageTypePostgresql, databaseStorage)\n\t\t\tqueryClient.Run(ctx)\n\n\t\t},\n\t}\n\treturn cmd\n}\n\nfunc getDsn(ctx context.Context, projectWorkspace, downloadWorkspace string) (string, error) {\n\n\t// 1. load from project workspace\n\tdsn, _ := loadDSNFromProjectWorkspace(ctx, projectWorkspace, downloadWorkspace)\n\tif dsn != \"\" {\n\t\tcli_ui.Infof(\"Find database connection in workspace. %s \\n\", projectWorkspace)\n\t\treturn dsn, nil\n\t}\n\n\t// 2. load from selefra cloud\n\tclient, diagnostics := cloud_sdk.NewCloudClient(cli_env.GetServerHost())\n\tif err := cli_ui.PrintDiagnostics(diagnostics); err != nil {\n\t\treturn \"\", err\n\t}\n\tif c, _ := client.GetCredentials(); c != nil {\n\t\tc, d := client.Login(c.Token)\n\t\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\td = client.SaveCredentials(c)\n\t\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\torgDSN, d := client.FetchOrgDSN()\n\t\tif err := cli_ui.PrintDiagnostics(d); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif orgDSN != \"\" {\n\t\t\tcli_ui.Infof(\"Find database connection in you selefra cloud. \\n\")\n\t\t\treturn orgDSN, nil\n\t\t}\n\t}\n\n\t// 3. get dsn from env\n\tdsn = os.Getenv(env.DatabaseDsn)\n\tif dsn != \"\" {\n\t\tcli_ui.Infof(\"Find database connection in your env. \\n\")\n\t\treturn dsn, nil\n\t}\n\n\t// 4. start default postgresql instance\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\t_ = cli_ui.PrintDiagnostics(message)\n\t\t}\n\t})\n\tdsn = pgstorage.DefaultPostgreSQL(downloadWorkspace, messageChannel)\n\tmessageChannel.ReceiverWait()\n\tif dsn != \"\" {\n\t\treturn dsn, nil\n\t}\n\n\treturn \"\", errors.New(\"Can not find database connection\")\n}\n\n// Look for DSN in the configuration of the project's working directory\nfunc loadDSNFromProjectWorkspace(ctx context.Context, projectWorkspace, downloadWorkspace string) (string, error) {\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\t// Any error while loading will not print\n\t\t//if utils.IsNotEmpty(message) {\n\t\t//\t_ = cli_ui.PrintDiagnostics(message)\n\t\t//}\n\t})\n\tloader, err := module_loader.NewLocalDirectoryModuleLoader(&module_loader.LocalDirectoryModuleLoaderOptions{\n\t\tModuleLoaderOptions: &module_loader.ModuleLoaderOptions{\n\t\t\tSource:            projectWorkspace,\n\t\t\tVersion:           \"\",\n\t\t\tDownloadDirectory: downloadWorkspace,\n\t\t\tProgressTracker:   nil,\n\t\t\tMessageChannel:    messageChannel,\n\t\t\tDependenciesTree:  []string{projectWorkspace},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\trootModule, b := loader.Load(ctx)\n\tif !b {\n\t\treturn \"\", nil\n\t}\n\tif rootModule.SelefraBlock != nil && rootModule.SelefraBlock.ConnectionBlock != nil && rootModule.SelefraBlock.ConnectionBlock.BuildDSN() != \"\" {\n\t\treturn rootModule.SelefraBlock.ConnectionBlock.BuildDSN(), nil\n\t}\n\treturn \"\", nil\n}\n"
  },
  {
    "path": "cmd/query/query_test.go",
    "content": "package query\n\n"
  },
  {
    "path": "cmd/query/sql.go",
    "content": "package query\n\nconst TABLESQL = \"SELECT tablename FROM pg_tables where schemaname = 'public' and tablename<>'pg_stat_statements'\"\nconst COLUMNSQL = \"select table_schema,table_name,column_name from information_schema.columns where table_schema='public' and table_name<>'pg_stat_statements'\"\n"
  },
  {
    "path": "cmd/query/test_data/query.sh",
    "content": "#!/usr/bin/env bash\n#######################################################################################################################\n#                                                                                                                     #\n#                              This script helps you test interactive programs                                        #\n#                                                                                                                     #\n#                                                                                                                     #\n#                                                                                                   Version: 0.0.1    #\n#                                                                                                                     #\n#######################################################################################################################\n\n# for command `selefra query`\ncd ../../../\ngo build\nrm -rf ./test\nmkdir test\nmv selefra.exe ./test\ncd test\necho \"begin run command selefra query\"\n./selefra.exe query $@\n\n"
  },
  {
    "path": "cmd/root.go",
    "content": "package cmd\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/cmd/apply\"\n\t\"github.com/selefra/selefra/cmd/fetch\"\n\t\"github.com/selefra/selefra/cmd/gpt\"\n\tinitCmd \"github.com/selefra/selefra/cmd/init\"\n\t\"github.com/selefra/selefra/cmd/login\"\n\t\"github.com/selefra/selefra/cmd/logout\"\n\t\"github.com/selefra/selefra/cmd/provider\"\n\t\"github.com/selefra/selefra/cmd/query\"\n\t\"github.com/selefra/selefra/cmd/test\"\n\t\"github.com/selefra/selefra/cmd/version\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/cli_env\"\n\t\"github.com/selefra/selefra/pkg/telemetry\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/spf13/cobra\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar group = make(map[string][]*cobra.Command)\n\n// rootCmd represents the base command when called without any subcommands\nvar rootCmd = &cobra.Command{\n\tUse:   \"selefra\",\n\tShort: \"Selefra - Simplify Cloud and SaaS analysis with Selefra.\",\n\tLong: `\nFor details see the selefra document https://selefra.io/docs\nIf you like selefra, give us a star https://github.com/selefra/selefra\n`,\n\tPersistentPreRun: func(cmd *cobra.Command, args []string) {\n\t\tlevel, _ := cmd.Flags().GetString(\"loglevel\")\n\t\tglobal.SetLogLevel(level)\n\n\t\t// get telemetry from command params\n\t\ttelemetryEnable, err := cmd.Flags().GetBool(\"telemetry\")\n\t\tif err == nil {\n\t\t\t// user give telemetry param\n\t\t\ttelemetry.TelemetryEnable = telemetryEnable\n\t\t} else {\n\t\t\t// try find it in env variables\n\t\t\ttelemetryEnableString := cli_env.GetSelefraTelemetryEnable()\n\t\t\tif telemetryEnableString != \"\" {\n\t\t\t\tif telemetryEnableString == \"true\" || telemetryEnableString == \"t\" {\n\t\t\t\t\ttelemetry.TelemetryEnable = true\n\t\t\t\t} else if telemetryEnableString == \"false\" || telemetryEnableString == \"f\" {\n\t\t\t\t\ttelemetry.TelemetryEnable = false\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// keep default value\n\t\t\t}\n\t\t}\n\t},\n\tPersistentPostRunE: func(cmd *cobra.Command, args []string) error {\n\t\t// need close telemetry on exit\n\t\tdiagnostics := telemetry.Close(cmd.Context())\n\t\treturn cli_ui.PrintDiagnostics(diagnostics)\n\t},\n}\n\n// Execute adds all child commands to the root command and sets flags appropriately.\n// This is called by main.main(). It only needs to happen once to the rootCmd.\nfunc Execute() {\n\tlogo := \" _____        _         __              \\n/  ___|      | |       / _|             \\n\\\\ `--.   ___ | |  ___ | |_  _ __   __ _ \\n `--. \\\\ / _ \\\\| | / _ \\\\|  _|| '__| / _` |\\n/\\\\__/ /|  __/| ||  __/| |  | |   | (_| |\\n\\\\____/  \\\\___||_| \\\\___||_|  |_|    \\\\__,_|\\n\\n\"\n\tcli_ui.Infof(logo)\n\tcli_ui.Infof(\"Selefra - Simplify Cloud and SaaS analysis with Selefra.\\n\\n\\n\")\n\n\tdefer func() {\n\t\tutils.Close()\n\t}()\n\n\tif err := rootCmd.Execute(); err != nil {\n\t\tlog.Printf(\"Error occurred in Execute: %+v\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc init() {\n\trootCmd.PersistentFlags().StringP(\"loglevel\", \"l\", \"info\", \"log level\")\n\trootCmd.PersistentFlags().BoolP(\"telemetry \", \"t\", true, \"Whether to enable telemetry. This parameter is enabled by default\")\n\t//rootCmd.PersistentFlags().StringVar(&cfgFile, \"config\", \"\", \"config file (default is $HOME/.test.yaml)\")\n\tgroup[\"main\"] = []*cobra.Command{\n\t\tinitCmd.NewInitCmd(),\n\t\ttest.NewTestCmd(),\n\t\tapply.NewApplyCmd(),\n\t\tlogin.NewLoginCmd(),\n\t\tlogout.NewLogoutCmd(),\n\t\tgpt.NewGPTCmd(),\n\t}\n\n\tgroup[\"other\"] = []*cobra.Command{\n\t\tfetch.NewFetchCmd(),\n\t\tprovider.NewProviderCmd(),\n\t\tquery.NewQueryCmd(),\n\t\tversion.NewVersionCmd(),\n\t}\n\n\trootCmd.AddCommand(group[\"main\"]...)\n\trootCmd.AddCommand(group[\"other\"]...)\n\n\trootCmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {\n\t\tfmt.Println(strings.TrimSpace(cmd.Long))\n\n\t\tfmt.Println(\"\\nUsage:\")\n\t\tfmt.Printf(\"  %-13s\", \"selefra [command]\\n\\n\")\n\n\t\tfmt.Println(\"Main commands:\")\n\t\tfor _, c := range group[\"main\"] {\n\t\t\tfmt.Printf(\"  %-13s%s\\n\", c.Name(), c.Short)\n\t\t}\n\t\tfmt.Println()\n\t\tfmt.Println(\"All other commands:\")\n\t\tfor _, c := range group[\"other\"] {\n\t\t\tfmt.Printf(\"  %-13s%s\\n\", c.Name(), c.Short)\n\t\t}\n\t\tfmt.Println()\n\n\t\tfmt.Println(\"Flags\")\n\t\tfmt.Println(cmd.Flags().FlagUsages())\n\n\t\tfmt.Println(`Use \"selefra [command] --help\" for more information about a command.`)\n\t})\n\n}\n"
  },
  {
    "path": "cmd/tools/providers.go",
    "content": "package tools\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/pkg/storage/pgstorage\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n//// ProviderConfigStrs find all selefra provider config by id from selefra config and return provider config in string format\n//// TODO: deprecated\n//func ProviderConfigStrs(config *config.RootConfig, id string) ([]string, error) {\n//\tvar providerConfs []string\n//\tfor _, group := range config.Providers.Content {\n//\t\tfor i, node := range group.Content {\n//\t\t\tif node.Kind == yaml.ScalarNode && node.Value == \"provider\" && group.Content[i+1].Value == id {\n//\t\t\t\tb, err := yaml.Marshal(group)\n//\t\t\t\tif err != nil {\n//\t\t\t\t\treturn nil, err\n//\t\t\t\t}\n//\t\t\t\tproviderConfs = append(providerConfs, string(b))\n//\n//\t\t\t}\n//\t\t}\n//\t}\n//\treturn providerConfs, nil\n//}\n//\n//// ProvidersByID find all provider in rootConfig by id\n//func ProvidersByID(rootConfig *config.RootConfig, id string) []*config.ProviderBlock {\n//\tvar prvds = make([]*config.ProviderBlock, 0)\n//\tfor _, group := range rootConfig.Providers.Content {\n//\t\tfor i, node := range group.Content {\n//\t\t\tif node.Kind == yaml.ScalarNode && node.Value == \"provider\" && group.Content[i+1].Value == id {\n//\t\t\t\tb, err := yaml.Marshal(group)\n//\t\t\t\tif err != nil {\n//\t\t\t\t\tcontinue\n//\t\t\t\t}\n//\n//\t\t\t\tvar prvd config.ProviderBlock\n//\t\t\t\tif err := yaml.Unmarshal(b, &prvd); err != nil {\n//\t\t\t\t\tcontinue\n//\t\t\t\t}\n//\n//\t\t\t\tprvds = append(prvds, &prvd)\n//\t\t\t}\n//\t\t}\n//\t}\n//\n//\treturn prvds\n//}\n//\n//// SetProviderTmpl set the provider yaml template\n//func SetProviderTmpl(template string, provider registry.ProviderBinary, config *config.RootConfig) error {\n//\tif config.Providers.Kind != yaml.SequenceNode {\n//\t\tconfig.Providers.Kind = yaml.SequenceNode\n//\t\tconfig.Providers.Tag = \"!!seq\"\n//\t\tconfig.Providers.Value = \"\"\n//\t\tconfig.Providers.Content = []*yaml.Node{}\n//\t}\n//\n//\tvar node yaml.Node\n//\n//\terr := yaml.Unmarshal([]byte(template), &node)\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tvar provNode yaml.Node\n//\tif node.Content == nil {\n//\t\tprovNode.Content = []*yaml.Node{\n//\t\t\t{\n//\t\t\t\tKind: yaml.MappingNode,\n//\t\t\t\tTag:  \"!!map\",\n//\t\t\t\tContent: append([]*yaml.Node{\n//\t\t\t\t\t{\n//\t\t\t\t\t\tKind:  yaml.ScalarNode,\n//\t\t\t\t\t\tValue: \"name\",\n//\t\t\t\t\t},\n//\t\t\t\t\t{\n//\t\t\t\t\t\tKind:  yaml.ScalarNode,\n//\t\t\t\t\t\tValue: provider.Name,\n//\t\t\t\t\t},\n//\t\t\t\t\t{\n//\t\t\t\t\t\tKind:        yaml.ScalarNode,\n//\t\t\t\t\t\tValue:       \"provider\",\n//\t\t\t\t\t\tFootComment: template,\n//\t\t\t\t\t},\n//\t\t\t\t\t{\n//\t\t\t\t\t\tKind:  yaml.ScalarNode,\n//\t\t\t\t\t\tValue: provider.Name,\n//\t\t\t\t\t},\n//\t\t\t\t}),\n//\t\t\t},\n//\t\t}\n//\t} else {\n//\t\tprovNode.Content = []*yaml.Node{\n//\t\t\t{\n//\t\t\t\tKind: yaml.MappingNode,\n//\t\t\t\tTag:  \"!!map\",\n//\t\t\t\tContent: append([]*yaml.Node{\n//\t\t\t\t\t{\n//\t\t\t\t\t\tKind:  yaml.ScalarNode,\n//\t\t\t\t\t\tValue: \"name\",\n//\t\t\t\t\t},\n//\t\t\t\t\t{\n//\t\t\t\t\t\tKind:  yaml.ScalarNode,\n//\t\t\t\t\t\tValue: provider.Name,\n//\t\t\t\t\t},\n//\t\t\t\t\t{\n//\t\t\t\t\t\tKind:        yaml.ScalarNode,\n//\t\t\t\t\t\tValue:       \"provider\",\n//\t\t\t\t\t\tFootComment: template,\n//\t\t\t\t\t},\n//\t\t\t\t\t{\n//\t\t\t\t\t\tKind:  yaml.ScalarNode,\n//\t\t\t\t\t\tValue: provider.Name,\n//\t\t\t\t\t},\n//\t\t\t\t}),\n//\t\t\t},\n//\t\t}\n//\t}\n//\n//\tconfig.Providers.Content = append(config.Providers.Content, provNode.Content...)\n//\n//\treturn nil\n//}\n//\n//// AppendProviderDecl append a provider declare for rootConfig.Selefra.RequireProvidersBlock\n//func AppendProviderDecl(provider registry.ProviderBinary, rootConfig *config.RootConfig, configVersion string) error {\n//\tsource, latestSource := utils.CreateSource(provider.Name, provider.Version, configVersion)\n//\t_, configPath, err := utils.Home()\n//\tif err != nil {\n//\t\tcli_ui.Errorln(\"SetSelefraProviderError: \" + err.Error())\n//\t\treturn err\n//\t}\n//\n//\tvar pathMap = make(map[string]string)\n//\tfile, err := os.ReadFile(configPath)\n//\tif err != nil {\n//\t\tcli_ui.Errorln(\"SetSelefraProviderError: \" + err.Error())\n//\t\treturn err\n//\t}\n//\tjson.Unmarshal(file, &pathMap)\n//\tif latestSource != \"\" {\n//\t\tpathMap[latestSource] = provider.Filepath\n//\t}\n//\n//\tpathMap[source] = provider.Filepath\n//\n//\tpathMapJson, err := json.Marshal(pathMap)\n//\n//\tif err != nil {\n//\t\tcli_ui.Errorln(\"SetSelefraProviderError: \" + err.Error())\n//\t}\n//\n//\terr = os.WriteFile(configPath, pathMapJson, 0644)\n//\tif rootConfig != nil {\n//\t\trootConfig.Selefra.ProviderDecls = append(rootConfig.Selefra.ProviderDecls, &config.RequireProvider{\n//\t\t\tName:    provider.Name,\n//\t\t\tSource:  &strings.Split(source, \"@\")[0],\n//\t\t\tVersion: provider.Version,\n//\t\t})\n//\t}\n//\treturn nil\n//}\n\n// CacheExpired check whether the cache time expires\nfunc CacheExpired(ctx context.Context, storage *postgresql_storage.PostgresqlStorage, cacheTime string) (bool, error) {\n\trequireKey := config.GetCacheKey()\n\tfetchTime, err := pgstorage.GetStorageValue(ctx, storage, requireKey)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\tfetchTimeLocal, err := time.ParseInLocation(time.RFC3339, fetchTime, time.Local)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\n\tduration, err := parseDuration(cacheTime)\n\tif err != nil || duration == 0 {\n\t\treturn true, err\n\t}\n\tif time.Now().Sub(fetchTimeLocal) > duration {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\nfunc parseDuration(d string) (time.Duration, error) {\n\td = strings.TrimSpace(d)\n\tdr, err := time.ParseDuration(d)\n\tif err == nil {\n\t\treturn dr, nil\n\t}\n\tif strings.Contains(d, \"d\") {\n\t\tindex := strings.Index(d, \"d\")\n\n\t\thour, _ := strconv.Atoi(d[:index])\n\t\tdr = time.Hour * 24 * time.Duration(hour)\n\t\tndr, err := time.ParseDuration(d[index+1:])\n\t\tif err != nil {\n\t\t\treturn dr, nil\n\t\t}\n\t\treturn dr + ndr, nil\n\t}\n\n\tdv, err := strconv.ParseInt(d, 10, 64)\n\treturn time.Duration(dv), err\n}\n"
  },
  {
    "path": "cmd/tools/providers_online_test.go",
    "content": "package tools\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"testing\"\n)\n\nfunc getProviderAndConfigOnline() (registry.ProviderBinary, *config.RootConfig, error) {\n\tglobal.Init(\"getProviderAndConfigOnline\", global.WithWorkspace(\"../../tests/workspace/online\"))\n\tglobal.SetToken(\"xxxxxxxxxxxxxxxxxxxxxx\")\n\tglobal.SERVER = \"dev-api.selefra.io\"\n\tctx := context.Background()\n\tcof, err := config.GetConfig()\n\tif err != nil {\n\t\treturn registry.ProviderBinary{}, nil, err\n\t}\n\tpr := registry.Provider{\n\t\tName:    \"aws\",\n\t\tVersion: \"latest\",\n\t\tSource:  \"\",\n\t}\n\tnamespace, _, err := utils.Home()\n\tif err != nil {\n\t\treturn registry.ProviderBinary{}, nil, err\n\t}\n\tprovider := registry.NewProviderRegistry(namespace)\n\tp, err := provider.Download(ctx, pr, true)\n\treturn p, cof, err\n}\n\nfunc TestGetProvidersOnline(t *testing.T) {\n\tglobal.Init(\"TestGetProvidersOnline\", global.WithWorkspace(\"../../tests/workspace/online\"))\n\tglobal.SetToken(\"xxxxxxxxxxxxxxxxxxxxxx\")\n\tglobal.SERVER = \"dev-api.selefra.io\"\n\ts, err := config.GetConfig()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tprovider, err := ProviderConfigStrs(s, \"aws\")\n\tt.Log(provider)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(provider) == 0 {\n\t\tt.Error(\"ProviderBlock is empty\")\n\t}\n}\n\nfunc TestSetProvidersOnline(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping test in short mode.\")\n\t\treturn\n\t}\n\tp, cof, err := getProviderAndConfigOnline()\n\ts := `\n      ##  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n      #accounts:\n      #    #     Optional. User identification\n      #  - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n      #    #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n      #    shared_config_profile: < PROFILE_NAME >\n      #    #    Optional. Location of shared configuration files\n      #    shared_config_files:\n      #      - <FILE_PATH>\n      #    #   Optional. Location of shared credentials files\n      #    shared_credentials_files:\n      #      - <FILE_PATH>\n      #    #    Optional. Role ARN we want to assume when accessing this account\n      #    role_arn: < YOUR_ROLE_ARN >\n      #    #    Optional. Named role session to grab specific operation under the assumed role\n      #    role_session_name: <SESSION_NAME>\n      #    #    Optional. Any outside of the org account id that has additional control\n      #    external_id: <ID>\n      #    #    Optional. Designated region of servers\n      #    default_region: <REGION_CODE>\n      #    #    Optional. by default assumes all regions\n      #    regions:\n      #      - us-east-1\n      #      - us-west-2\n      ##    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n      #max_attempts: 10\n      ##    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n      #max_backoff: 30\n`\n\terr = SetProviderTmpl(s, p, cof)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestSetSelefraProviderOnLine(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping test in short mode.\")\n\t\treturn\n\t}\n\tp, cof, err := getProviderAndConfigOnline()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\terr = AppendProviderDecl(p, cof, \"latest\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n"
  },
  {
    "path": "cmd/tools/providers_test.go",
    "content": "package tools\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gopkg.in/yaml.v3\"\n\t\"testing\"\n)\n\nfunc getProviderAndConfig() (registry.ProviderBinary, *config.RootConfig, error) {\n\tglobal.Init(\"\", global.WithWorkspace(\"../../tests/workspace/offline\"))\n\tctx := context.Background()\n\tcof, err := config.GetConfig()\n\tif err != nil {\n\t\treturn registry.ProviderBinary{}, nil, err\n\t}\n\tpr := registry.Provider{\n\t\tName:    \"aws\",\n\t\tVersion: \"latest\",\n\t\tSource:  \"\",\n\t}\n\tnamespace, _, err := utils.Home()\n\tif err != nil {\n\t\treturn registry.ProviderBinary{}, nil, err\n\t}\n\tprovider := registry.NewProviderRegistry(namespace)\n\tp, err := provider.Download(ctx, pr, true)\n\treturn p, cof, err\n}\n\nfunc TestGetProviders(t *testing.T) {\n\tglobal.Init(\"\", global.WithWorkspace(\"../../tests/workspace/offline\"))\n\ts, err := config.GetConfig()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tprovider, err := ProviderConfigStrs(s, \"aws\")\n\trequire.NoError(t, err)\n\trequire.Equal(t, 1, len(provider))\n\trequire.Equal(t, \"aws_01\", provider[0])\n}\n\nfunc TestSetProviders(t *testing.T) {\n\tp, cof, err := getProviderAndConfig()\n\n\tvar node yaml.Node\n\tyamls := `providers:\n    - name: aws_01\n      cache: 1d\n      provider: aws\n      resources:`\n\tif err := yaml.Unmarshal([]byte(yamls), &node); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ts := `\n      ##  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n      #accounts:\n      #    #     Optional. User identification\n      #  - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n      #    #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n      #    shared_config_profile: < PROFILE_NAME >\n      #    #    Optional. Location of shared configuration files\n      #    shared_config_files:\n      #      - <FILE_PATH>\n      #    #   Optional. Location of shared credentials files\n      #    shared_credentials_files:\n      #      - <FILE_PATH>\n      #    #    Optional. Role ARN we want to assume when accessing this account\n      #    role_arn: < YOUR_ROLE_ARN >\n      #    #    Optional. Named role session to grab specific operation under the assumed role\n      #    role_session_name: <SESSION_NAME>\n      #    #    Optional. Any outside of the org account id that has additional control\n      #    external_id: <ID>\n      #    #    Optional. Designated region of servers\n      #    default_region: <REGION_CODE>\n      #    #    Optional. by default assumes all regions\n      #    regions:\n      #      - us-east-1\n      #      - us-west-2\n      ##    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n      #max_attempts: 10\n      ##    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n      #max_backoff: 30\n`\n\terr = SetProviderTmpl(s, p, cof)\n\tb, err := yaml.Marshal(cof.Providers)\n\tt.Log(string(b))\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestSetSelefraProvider(t *testing.T) {\n\tp, cof, err := getProviderAndConfig()\n\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\terr = AppendProviderDecl(p, cof, \"latest\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n"
  },
  {
    "path": "cmd/version/version.go",
    "content": "package version\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/spf13/cobra\"\n)\n\nvar Version = \"{{version}}\"\n\nfunc NewVersionCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse:              \"version\",\n\t\tShort:            \"Print Selefra's version number\",\n\t\tLong:             \"Print Selefra's version number\",\n\t\tPersistentPreRun: global.DefaultWrappedInit(),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tversion()\n\t\t},\n\t}\n\treturn cmd\n}\n\nfunc version() {\n\tfmt.Println(Version)\n}\n"
  },
  {
    "path": "cmd/version/version_test.go",
    "content": "package version\n\nimport \"testing\"\n\nfunc TestVersion(t *testing.T) {\n\tversion()\n}\n"
  },
  {
    "path": "config/config.go",
    "content": "package config\n\nimport (\n\t\"fmt\"\n\t\"github.com/mitchellh/go-homedir\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"gopkg.in/yaml.v3\"\n\t\"os\"\n\t\"path/filepath\"\n)\n\n// import (\n//\n//\t\"bytes\"\n//\t\"encoding/json\"\n//\t\"errors\"\n//\t\"fmt\"\n//\t\"github.com/mitchellh/go-homedir\"\n//\t\"github.com/selefra/selefra/cli_ui\"\n//\t\"github.com/selefra/selefra/pkg/utils\"\n//\t\"github.com/selefra/selefra/ui\"\n//\t\"github.com/spf13/viper\"\n//\t\"gopkg.in/yaml.v3\"\n//\t\"os\"\n//\t\"path\"\n//\t\"path/filepath\"\n//\t\"strings\"\n//\n//\t\"github.com/selefra/selefra/global\"\n//\n// )\n//\n// // ------------------------------------------------- --------------------------------------------------------------------\n//\n// type sectionName string\n//\n// const (\n//\n//\tSELEFRA   sectionName = \"selefra\"\n//\tMODULES   sectionName = \"modules\"\n//\tPROVIDERS sectionName = \"providers\"\n//\tVARIABLES sectionName = \"variables\"\n//\tRULES     sectionName = \"rules\"\n//\n// )\n//\n//\tvar typeMap = map[sectionName]bool{\n//\t\tSELEFRA:   true,\n//\t\tMODULES:   true,\n//\t\tPROVIDERS: true,\n//\t\tRULES:     true,\n//\t\tVARIABLES: true,\n//\t}\n//\n// // ------------------------------------------------- --------------------------------------------------------------------\n//\n// // ProviderBlock is provider config\n//\n//\ttype ProviderBlock struct {\n//\t\tName          string   `yaml:\"name\" json:\"name\"`\n//\t\tCache         string   `yaml:\"cache\" json:\"cache\"`\n//\t\tProvider      string   `yaml:\"provider\" json:\"provider\"`\n//\t\tMaxGoroutines uint64   `yaml:\"max_goroutines\" json:\"max_goroutines\"`\n//\t\tResources     []string `yaml:\"resources\" json:\"resources\"`\n//\t\tLogLevel      string   `yaml:\"log_level\" json:\"log_level\"`\n//\t}\n//\n//\ttype VariableBlock struct {\n//\t\tKey         string `yaml:\"key\" json:\"key\"`\n//\t\tDefault     string `yaml:\"default\" json:\"default\"`\n//\t\tDescription string `yaml:\"description\" json:\"description\"`\n//\t\tAuthor      string `yaml:\"author\" json:\"author\"`\n//\t}\n//\n// // RootConfig is root config for selefra project\n//\n//\ttype RootConfig struct {\n//\t\tSelefra   SelefraBlock    `yaml:\"selefra\"`\n//\t\tProviders yaml.Node       `yaml:\"providers\"`\n//\t\tVariables []VariableBlock `yaml:\"variables\"`\n//\t}\n//\n//\ttype RootConfigInit struct {\n//\t\tSelefra   SelefraConfigInit `yaml:\"selefra\"`\n//\t\tProviders yaml.Node         `yaml:\"providers\"`\n//\t}\n//\n//\ttype RootConfigInitWithLogin struct {\n//\t\tSelefra   SelefraConfigInitWithLogin `yaml:\"selefra\"`\n//\t\tProviders yaml.Node                  `yaml:\"providers\"`\n//\t}\n//\n//\ttype RuleSet struct {\n//\t\tRules []Rule `yaml:\"rules\"`\n//\t}\n//\n//\ttype Rule struct {\n//\t\tPath     string         `yaml:\"path\" json:\"path\"`\n//\t\tName     string         `yaml:\"name\" json:\"name\"`\n//\t\tQuery    string         `yaml:\"query\" json:\"query\"`\n//\t\tLabels   map[string]any `yaml:\"labels\" json:\"labels\"`\n//\t\tMetadata struct {\n//\t\t\tId          string   `yaml:\"id\" json:\"id\"`\n//\t\t\tSeverity    string   `yaml:\"severity\" json:\"severity\"`\n//\t\t\tProvider    string   `yaml:\"provider\" json:\"provider\"`\n//\t\t\tTags        []string `yaml:\"tags\" json:\"tags\"`\n//\t\t\tAuthor      string   `yaml:\"author\" json:\"author\"`\n//\t\t\tRemediation string   `yaml:\"remediation\" json:\"remediation\"`\n//\t\t\tTitle       string   `yaml:\"title\" json:\"title\"`\n//\t\t\tDescription string   `yaml:\"description\" json:\"description\"`\n//\t\t}\n//\t\tOutput string `yaml:\"output\" json:\"-\"`\n//\t}\n//\n//\ttype ModuleConfig struct {\n//\t\tModules []Module `yaml:\"modules\" json:\"modules\"`\n//\t}\n//\n//\ttype Module struct {\n//\t\tName     string          `yaml:\"name\" json:\"name\"`\n//\t\tUses     []string        `yaml:\"uses\" json:\"uses\"`\n//\t\tChildren []*ModuleConfig `yaml:\"-\" json:\"children\"`\n//\t}\n//\n// // CloudBlock is config for selefra cloud\n// // when user is login, cloud config exist, else not\n//\n//\ttype CloudBlock struct {\n//\t\tProject      string `yaml:\"project\" mapstructure:\"project\"`\n//\t\tOrganization string `yaml:\"organization\" mapstructure:\"organization\"`\n//\t\tHostName     string `yaml:\"hostname\" mapstructure:\"hostname\"`\n//\t}\n//\n// // SelefraBlock is the project config\n//\n//\ttype SelefraBlock struct {\n//\t\tCloud         *CloudBlock        `yaml:\"cloud\" mapstructure:\"cloud\"`\n//\t\tName          string             `yaml:\"name\" mapstructure:\"name\"`\n//\t\tCliVersion    string             `yaml:\"cli_version\" mapstructure:\"cli_version\"`\n//\t\tLogLevel      string             `yaml:\"log_level\" mapstructure:\"log_level\"`\n//\t\tProviderDecls []*RequireProvider `yaml:\"providers\" mapstructure:\"providers\"`\n//\t\t//ConnectionBlock *DB                 `yaml:\"connection\" mapstructure:\"connection\"`\n//\t}\n//\n// // SelefraConfigInit is a subset for SelefraBlock without cloud config\n//\n//\ttype SelefraConfigInit struct {\n//\t\tName       string              `yaml:\"name\" mapstructure:\"name\"`\n//\t\tCliVersion string              `yaml:\"cli_version\" mapstructure:\"cli_version\"`\n//\t\tProviders  []*ProviderDeclInit `yaml:\"providers\" mapstructure:\"providers\"`\n//\t}\n//\n// // SelefraConfigInitWithLogin is a subset for SelefraBlock with a cloud config\n//\n//\ttype SelefraConfigInitWithLogin struct {\n//\t\tCloud      *CloudBlock         `yaml:\"cloud\" mapstructure:\"cloud\"`\n//\t\tName       string              `yaml:\"name\" mapstructure:\"name\"`\n//\t\tCliVersion string              `yaml:\"cli_version\" mapstructure:\"cli_version\"`\n//\t\tProviders  []*ProviderDeclInit `yaml:\"providers\" mapstructure:\"providers\"`\n//\t}\n//\n// // RequireProvider is a provider declaration\n//\n//\ttype RequireProvider struct {\n//\t\tName    string  `yaml:\"name,omitempty\" json:\"name,omitempty\"`\n//\t\tSource  *string `yaml:\"source,omitempty\" json:\"source,omitempty\"`\n//\t\tVersion string  `yaml:\"version,omitempty\" json:\"version,omitempty\"`\n//\t\tPath    string  `yaml:\"path\" json:\"path\"`\n//\t}\n//\n// // ProviderDeclInit is a RequireProvider without Path field\n//\n//\ttype ProviderDeclInit struct {\n//\t\tName    string  `yaml:\"name,omitempty\" json:\"name,omitempty\"`\n//\t\tSource  *string `yaml:\"source,omitempty\" json:\"source,omitempty\"`\n//\t\tVersion string  `yaml:\"version,omitempty\" json:\"version,omitempty\"`\n//\t}\n//\n//\ttype DB struct {\n//\t\tDriver string `yaml:\"driver,omitempty\" json:\"driver,omitempty\"`\n//\t\t// These params are mutually exclusive with DSN\n//\t\tType     string   `yaml:\"type,omitempty\" json:\"type,omitempty\"`\n//\t\tUsername string   `yaml:\"username,omitempty\" json:\"username,omitempty\"`\n//\t\tPassword string   `yaml:\"password,omitempty\" json:\"password,omitempty\"`\n//\t\tHost     string   `yaml:\"host,omitempty\" json:\"host,omitempty\"`\n//\t\tPort     string   `yaml:\"port,omitempty\" json:\"port,omitempty\"`\n//\t\tDatabase string   `yaml:\"database,omitempty\" json:\"database,omitempty\"`\n//\t\tSSLMode  string   `yaml:\"sslmode,omitempty\" json:\"sslmode,omitempty\"`\n//\t\tExtras   []string `yaml:\"extras,omitempty\" json:\"extras,omitempty\"`\n//\t}\n//\n// type YamlKey int\n//\n// type section2File2Content map[sectionName]map[string]string\n//\n//\tfunc (c *SelefraBlock) GetHostName() string {\n//\t\tif c.Cloud != nil && c.Cloud.HostName != \"\" {\n//\t\t\treturn c.Cloud.HostName\n//\t\t}\n//\t\treturn global.SERVER\n//\t}\n//\n// // ------------------------------------------------- --------------------------------------------------------------------\n//\n//\tfunc GetConfig() (*RootConfig, error) {\n//\t\tif err := IsSelefra(); err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\n//\t\treturn getConfig()\n//\t}\n//\n//\tfunc getConfig() (c *RootConfig, err error) {\n//\t\tconfig := viper.New()\n//\t\tconfig.SetConfigType(\"yaml\")\n//\t\tclientByte, err := GetClientStr()\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t\terr = config.ReadConfig(bytes.NewBuffer(clientByte))\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t\terr = yaml.Unmarshal(clientByte, &c)\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t\tglobal.SetLogLevel(c.Selefra.LogLevel)\n//\t\tglobal.SetProjectName(c.Selefra.Name)\n//\n//\t\tif c.Selefra.Cloud != nil {\n//\t\t\tglobal.SetRelvPrjName(c.Selefra.Cloud.Project)\n//\t\t}\n//\n//\t\tglobal.SERVER = c.Selefra.GetHostName() // TODO: replace\n//\n//\t\treturn c, nil\n//\t}\n//\n// // FileMap load all yaml config file in [dirname] and return a map filename => file_content\n// func FileMap(dirname string) (fm map[string]string, err error) {\n//\n//\t\tvar fn func(dirname string)\n//\t\tfn = func(dirname string) {\n//\t\t\tfiles, e := os.ReadDir(dirname)\n//\t\t\tif e != nil {\n//\t\t\t\terr = e\n//\t\t\t\treturn\n//\t\t\t}\n//\t\t\tfor _, file := range files {\n//\t\t\t\tif file.IsDir() {\n//\t\t\t\t\tfn(filepath.Join(dirname, file.Name()))\n//\t\t\t\t} else {\n//\t\t\t\t\tif path.Ext(file.Name()) == \".yaml\" {\n//\t\t\t\t\t\tb, e := os.ReadFile(filepath.Join(dirname, file.Name()))\n//\t\t\t\t\t\tif e != nil {\n//\t\t\t\t\t\t\terr = e\n//\t\t\t\t\t\t\treturn\n//\t\t\t\t\t\t}\n//\t\t\t\t\t\tfm[filepath.Join(dirname, file.Name())] = string(b)\n//\t\t\t\t\t}\n//\t\t\t\t}\n//\t\t\t}\n//\t\t}\n//\n//\t\tfn(dirname)\n//\n//\t\treturn fm, err\n//\t}\nfunc GetCacheKey() string {\n\treturn \"update_time\"\n}\n\n//\n//// GetSchemaKey return provider schema named <required.name>_<required_version>_<provider_name>\n//func GetSchemaKey(required *RequireProvider, cp ProviderBlock) string {\n//\tvar pre string\n//\tif required == nil {\n//\t\treturn pre + \"public\"\n//\t}\n//\tsourceArr := strings.Split(*required.Source, \"/\")\n//\tsource := strings.Replace(sourceArr[1]+\"@\"+required.Version, \"/\", \"_\", -1)\n//\tsource = strings.Replace(source, \"@\", \"_\", -1)\n//\tsource = strings.Replace(source, \".\", \"\", -1)\n//\ts := source + \"_\" + cp.Name\n//\treturn pre + s\n//}\n//\n//var ErrNotSelefra = errors.New(\"this workspace is not selefra workspace\")\n//\n////// IsSelefra return an error when workspace is not a selefra workspace\n////func IsSelefra() error {\n////\tconfigMap, err := readAllConfig(global.WorkSpace())\n////\tif err != nil {\n////\t\treturn err\n////\t}\n////\tif configMap[SELEFRA] == nil {\n////\t\treturn ErrNotSelefra\n////\t}\n////\treturn nil\n////}\n//\n//// realAllConfig read all yaml file and store it in a map\n//func readAllConfig(dirname string) (section2File2Content, error) {\n//\tvar err error\n//\n//\tcm := make(section2File2Content)\n//\n//\tvar fn func(dirname string)\n//\tfn = func(dirname string) {\n//\t\tfiles, err := os.ReadDir(dirname)\n//\t\tif err != nil {\n//\t\t\terr = err\n//\t\t\treturn\n//\t\t}\n//\t\tfor _, file := range files {\n//\t\t\tif file.IsDir() {\n//\t\t\t\tfn(filepath.Join(dirname, file.Name()))\n//\t\t\t} else {\n//\t\t\t\tif path.Ext(file.Name()) == \".yaml\" {\n//\t\t\t\t\tf, _ := file.Info()\n//\t\t\t\t\t_, err = readConfigFile(dirname, cm, f)\n//\t\t\t\t\tif err != nil {\n//\t\t\t\t\t\terr = err\n//\t\t\t\t\t\tcontinue\n//\t\t\t\t\t}\n//\t\t\t\t}\n//\t\t\t}\n//\t\t}\n//\t}\n//\n//\tfn(dirname)\n//\n//\treturn cm, err\n//}\n//\n//func readConfigFile(dirname string, configMap section2File2Content, file os.FileInfo) (section2File2Content, error) {\n//\tb, err := os.ReadFile(filepath.Join(dirname, file.Name()))\n//\tif err != nil {\n//\t\tcli_ui.Errorln(err)\n//\t\treturn nil, err\n//\t}\n//\tvar node yaml.Node\n//\terr = yaml.Unmarshal(b, &node)\n//\tif len(node.Content) > 0 && node.Content[0] != nil && len(node.Content[0].Content) > 0 {\n//\t\tfor i := range node.Content[0].Content {\n//\t\t\tif i%2 != 0 {\n//\t\t\t\tcontinue\n//\t\t\t}\n//\n//\t\t\tsn := sectionName(node.Content[0].Content[i].Value)\n//\t\t\tif typeMap[sn] {\n//\t\t\t\tvar strNode = yaml.Node{\n//\t\t\t\t\tKind: yaml.MappingNode,\n//\t\t\t\t\tContent: []*yaml.Node{\n//\t\t\t\t\t\tnode.Content[0].Content[i],   // k\n//\t\t\t\t\t\tnode.Content[0].Content[i+1], // v\n//\t\t\t\t\t},\n//\t\t\t\t}\n//\n//\t\t\t\tb, e := yaml.Marshal(strNode)\n//\t\t\t\tif e != nil {\n//\t\t\t\t\tcli_ui.Errorln(e)\n//\t\t\t\t\treturn nil, err\n//\t\t\t\t}\n//\t\t\t\tif configMap[sn] == nil {\n//\t\t\t\t\tconfigMap[sn] = make(map[string]string)\n//\t\t\t\t}\n//\t\t\t\tconfigMap[sn][filepath.Join(dirname, file.Name())] = string(b)\n//\t\t\t}\n//\t\t}\n//\t}\n//\treturn configMap, nil\n//}\n//\n//func assembleNode(configMap map[string]string) (*yaml.Node, map[string]*yaml.Node, error) {\n//\tvar baseNode *yaml.Node\n//\tvar nodeMap = make(map[string]*yaml.Node)\n//\tfor strPath, value := range configMap {\n//\t\tif baseNode == nil {\n//\t\t\tbaseNode = new(yaml.Node)\n//\t\t\ttempNode := new(yaml.Node)\n//\t\t\terr := yaml.Unmarshal([]byte(value), baseNode)\n//\t\t\tfmtNodePath(baseNode.Content[0].Content[1].Content, strPath, \"uses\")\n//\t\t\ts, _ := yaml.Marshal(baseNode)\n//\t\t\t_ = yaml.Unmarshal(s, tempNode)\n//\t\t\tnodeMap[strPath] = tempNode\n//\t\t\tif err != nil {\n//\t\t\t\treturn nil, nil, err\n//\t\t\t}\n//\t\t} else {\n//\t\t\tvar tempNode = new(yaml.Node)\n//\t\t\terr := yaml.Unmarshal([]byte(value), tempNode)\n//\t\t\tfmtNodePath(tempNode.Content[0].Content[1].Content, strPath, \"uses\")\n//\t\t\tbaseNode.Content[0].Content[1].Content = append(baseNode.Content[0].Content[1].Content, tempNode.Content[0].Content[1].Content...)\n//\t\t\tnodeMap[strPath] = tempNode\n//\t\t\tif err != nil {\n//\t\t\t\treturn nil, nil, err\n//\t\t\t}\n//\t\t}\n//\n//\t}\n//\n//\treturn baseNode, nodeMap, nil\n//}\n//\n//func fmtNodePath(nodes []*yaml.Node, path string, key string) {\n//\tif key == \"\" {\n//\t\treturn\n//\t}\n//\tfor i := range nodes {\n//\t\tfor ii := range nodes[i].Content {\n//\t\t\tif nodes[i].Content[ii].Value == key {\n//\t\t\t\tfor iii := range nodes[i].Content[ii+1].Content {\n//\t\t\t\t\tif strings.HasPrefix(nodes[i].Content[ii+1].Content[iii].Value, \".\") {\n//\t\t\t\t\t\tnodes[i].Content[ii+1].Value = filepath.Join(filepath.Dir(path), nodes[i].Content[ii+1].Value)\n//\t\t\t\t\t}\n//\t\t\t\t}\n//\t\t\t}\n//\t\t}\n//\t}\n//}\n//\n//var NoClient = errors.New(\"There is no selefra configuration！\")\n//\n//func GetClientStr() ([]byte, error) {\n//\tconfigMap, err := readAllConfig(global.WorkSpace())\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\n//\tif len(configMap[SELEFRA]) == 0 {\n//\t\treturn nil, NoClient\n//\t}\n//\n//\tselefraNode, _, err := assembleNode(configMap[SELEFRA])\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\n//\tproviderNodes, _, err := assembleNode(configMap[PROVIDERS])\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\n//\tvariableNodes, _, err := assembleNode(configMap[VARIABLES])\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\n//\tSelefraStr, err := yaml.Marshal(selefraNode)\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\tproviderStr, err := yaml.Marshal(providerNodes)\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\n//\tconfigStr := append(SelefraStr, providerStr...)\n//\tif variableNodes != nil {\n//\t\tvariableStr, err := yaml.Marshal(variableNodes)\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t\tconfigStr = append(configStr, variableStr...)\n//\t}\n//\treturn configStr, nil\n//}\n//\n//func GetModulesStr() ([]byte, error) {\n//\tconfigMap, err := readAllConfig(global.WorkSpace())\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\tvar paths []string\n//\tfor k := range configMap[MODULES] {\n//\t\tpaths = append(paths, k)\n//\t}\n//\tfor i := range paths {\n//\t\tgetAllModules(configMap[MODULES], \"\", paths[i])\n//\t}\n//\t_, moduleMap, err := assembleNode(configMap[MODULES])\n//\terr = deepPathModules(moduleMap)\n//\tcyclePathMap, err := makeCyclePathMap(moduleMap)\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\tfor cyclePath, paths := range cyclePathMap {\n//\t\tvar cyclePathList = []string{cyclePath}\n//\t\tif checkCycle(cyclePathMap, cyclePath, paths, &cyclePathList) {\n//\t\t\tcyclePathStr := strings.Join(cyclePathList, \" -> \")\n//\t\t\treturn nil, errors.New(\"Modules have circular references:\" + cyclePathStr)\n//\t\t}\n//\t}\n//\treturn makeUsesModule(moduleMap)\n//}\n//\n//func checkModuleFile(configMap map[string]string, workspace string, waitUsePath string, file os.FileInfo) error {\n//\tvar b []byte\n//\tvar err error\n//\tif strings.HasSuffix(waitUsePath, \".yaml\") {\n//\t\tb, err = os.ReadFile(waitUsePath)\n//\t} else if strings.HasSuffix(file.Name(), \".yaml\") {\n//\t\twaitUsePath = filepath.Join(waitUsePath, file.Name())\n//\t\tb, err = os.ReadFile(waitUsePath)\n//\t} else {\n//\t\terr = fmt.Errorf(\"the file name is not yaml:%s\", waitUsePath)\n//\t}\n//\tif err != nil {\n//\t\tcli_ui.Errorln(err.Error())\n//\t\treturn err\n//\t}\n//\tif strings.Index(string(b), \"modules:\") > -1 {\n//\t\tconfigMap[waitUsePath] = string(b)\n//\t\tvar module ModuleConfig\n//\t\terr = yaml.Unmarshal(b, &module)\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorln(err.Error())\n//\t\t\treturn err\n//\t\t}\n//\t\tfor _, module := range module.Modules {\n//\t\t\tfor i := range module.Uses {\n//\t\t\t\tgetAllModules(configMap, workspace, module.Uses[i])\n//\t\t\t}\n//\t\t}\n//\t}\n//\treturn nil\n//}\n//\n//func getAllModules(configMap map[string]string, workspace, path string) {\n//\tvar waitUsePath string\n//\tif strings.HasPrefix(path, \"selefra/\") {\n//\t\tmodulesName := strings.Split(path, \"/\")[1]\n//\t\tmodulePath, err := utils.GetHomeModulesPath(modulesName, \"\")\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorln(err.Error())\n//\t\t}\n//\t\twaitUsePath = strings.Replace(path, \"selefra\", modulePath, 1)\n//\t\tworkspace = modulePath + \"/\" + modulesName\n//\t} else if strings.HasPrefix(path, \"app.selefra.io\") {\n//\t\tmodulesArr := strings.Split(path, \"/\")\n//\t\tmodulesOrg := modulesArr[1]\n//\t\tmodulesName := modulesArr[2]\n//\t\tmodulePath, err := utils.GetHomeModulesPath(modulesName, modulesOrg)\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorln(err.Error())\n//\t\t}\n//\t\twaitUsePath = strings.Replace(path, strings.Join(modulesArr[:2], \"/\"), modulePath, 1)\n//\t\tworkspace = modulePath + \"/\" + modulesName\n//\t} else {\n//\t\twaitUsePath = filepath.Join(workspace, path)\n//\t\tif workspace == \"\" {\n//\t\t\tworkspace = global.WorkSpace()\n//\t\t}\n//\t}\n//\tfile, err := os.Stat(waitUsePath)\n//\tif err != nil {\n//\t\tcli_ui.Errorln(err.Error())\n//\t\treturn\n//\t}\n//\tif file.IsDir() {\n//\t\tfiles, err := os.ReadDir(waitUsePath)\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorln(err.Error())\n//\t\t\treturn\n//\t\t}\n//\t\tfor _, file := range files {\n//\t\t\tf, err := file.Info()\n//\t\t\tif err != nil {\n//\t\t\t\tcli_ui.Errorln(err.Error())\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t\terr = checkModuleFile(configMap, workspace, waitUsePath, f)\n//\t\t\tif err != nil {\n//\t\t\t\tcli_ui.Errorln(err.Error())\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t}\n//\t} else {\n//\t\terr = checkModuleFile(configMap, workspace, waitUsePath, file)\n//\t\tif err != nil {\n//\t\t\tcli_ui.Errorln(err.Error())\n//\t\t\treturn\n//\t\t}\n//\t}\n//}\n//\n//func deepCopyYamlContent(node *yaml.Node) *yaml.Node {\n//\tvar tempNode = new(yaml.Node)\n//\ts, _ := yaml.Marshal(node)\n//\t_ = yaml.Unmarshal(s, tempNode)\n//\treturn tempNode.Content[0]\n//}\n//\n//func deepPathModules(moduleMap map[string]*yaml.Node) error {\n//\tfor excludePath, node := range moduleMap {\n//\t\tfor i := range node.Content[0].Content[1].Content {\n//\t\t\tvar uses string\n//\t\t\tfor i2 := range node.Content[0].Content[1].Content[i].Content {\n//\t\t\t\tif node.Content[0].Content[1].Content[i].Content[i2].Value == \"uses\" {\n//\t\t\t\t\tuses = node.Content[0].Content[1].Content[i].Content[i2+1].Value\n//\t\t\t\t}\n//\t\t\t}\n//\t\t\tif uses == \"\" {\n//\t\t\t\treturn errors.New(\"Module configuration error, missing uses\")\n//\t\t\t}\n//\t\t\tfile, err := os.Stat(uses)\n//\t\t\tif os.IsNotExist(err) {\n//\t\t\t\treturn errors.New(\"Module file does not exist:\" + uses)\n//\t\t\t}\n//\t\t\tif file.IsDir() {\n//\t\t\t\tvar paths []string\n//\t\t\t\tfiles, err := os.ReadDir(uses)\n//\t\t\t\tif err != nil {\n//\t\t\t\t\treturn errors.New(\"open dir failed:\" + err.Error())\n//\t\t\t\t}\n//\t\t\t\tfor _, file := range files {\n//\t\t\t\t\tfileName := filepath.Join(uses, file.Name())\n//\t\t\t\t\tif strings.HasSuffix(fileName, \".yaml\") && fileName != excludePath {\n//\t\t\t\t\t\tpaths = append(paths, fileName)\n//\t\t\t\t\t}\n//\t\t\t\t}\n//\t\t\t\tif len(paths) > 0 {\n//\t\t\t\t\ttempNode := deepCopyYamlContent(node.Content[0].Content[1].Content[i])\n//\t\t\t\t\tnode.Content[0].Content[1].Content = append(node.Content[0].Content[1].Content[:i], node.Content[0].Content[1].Content[i+1:]...)\n//\t\t\t\t\tfor _, mPath := range paths {\n//\t\t\t\t\t\twaitAppendNode := deepCopyYamlContent(tempNode)\n//\t\t\t\t\t\tfor i3 := range waitAppendNode.Content {\n//\t\t\t\t\t\t\tif waitAppendNode.Content[i3].Value == \"uses\" {\n//\t\t\t\t\t\t\t\twaitAppendNode.Content[i3+1].Value = mPath\n//\t\t\t\t\t\t\t}\n//\t\t\t\t\t\t}\n//\t\t\t\t\t\tnode.Content[0].Content[1].Content = append(node.Content[0].Content[1].Content, waitAppendNode)\n//\t\t\t\t\t}\n//\t\t\t\t}\n//\t\t\t} else {\n//\t\t\t\tfileName := file.Name()\n//\t\t\t\tif !strings.HasSuffix(fileName, \".yaml\") {\n//\t\t\t\t\treturn errors.New(\"Module file does not yaml:\" + uses)\n//\t\t\t\t}\n//\t\t\t}\n//\t\t}\n//\t}\n//\treturn nil\n//}\n//\n//func makeUsesModule(nodesMap map[string]*yaml.Node) ([]byte, error) {\n//\tvar usedModuleMap = make(map[string]bool)\n//\tvar ModulesMap = make(map[string]*ModuleConfig)\n//\tvar resultModules []Module\n//\tfor pathStr, node := range nodesMap {\n//\t\tModulesMap[pathStr] = new(ModuleConfig)\n//\t\tnodeStr, err := yaml.Marshal(node)\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t\terr = yaml.Unmarshal(nodeStr, ModulesMap[pathStr])\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t}\n//\n//\tfor _, moduleConfig := range ModulesMap {\n//\t\tfor i := range moduleConfig.Modules {\n//\t\t\tfor ii, use := range moduleConfig.Modules[i].Uses {\n//\t\t\t\tif strings.HasPrefix(use, \"selefra\") {\n//\t\t\t\t\tmodulesName := strings.Split(use, \"/\")[1]\n//\t\t\t\t\tmodules, err := utils.GetHomeModulesPath(modulesName, \"\")\n//\t\t\t\t\tif err != nil {\n//\t\t\t\t\t\treturn nil, err\n//\t\t\t\t\t}\n//\t\t\t\t\tmoduleConfig.Modules[i].Uses[ii] = strings.Replace(use, \"selefra\", modules, 1)\n//\t\t\t\t}\n//\t\t\t\tif strings.HasPrefix(use, \"app.selefra.io\") {\n//\t\t\t\t\tmodulesArr := strings.Split(use, \"/\")\n//\t\t\t\t\tmodulesOrg := modulesArr[1]\n//\t\t\t\t\tmodulesName := modulesArr[2]\n//\t\t\t\t\tmodulePath, err := utils.GetHomeModulesPath(modulesName, modulesOrg)\n//\t\t\t\t\tif err != nil {\n//\t\t\t\t\t\tcli_ui.Errorln(err.Error())\n//\t\t\t\t\t}\n//\t\t\t\t\tmoduleConfig.Modules[i].Uses[ii] = strings.Replace(use, strings.Join(modulesArr[:2], \"/\"), modulePath, 1)\n//\t\t\t\t}\n//\t\t\t}\n//\t\t\tfor _, use := range moduleConfig.Modules[i].Uses {\n//\t\t\t\tif ModulesMap[use] != nil {\n//\t\t\t\t\tusedModuleMap[use] = true\n//\t\t\t\t\tif path.IsAbs(use) {\n//\t\t\t\t\t\tfor i2 := range ModulesMap[use].Modules {\n//\t\t\t\t\t\t\tmUses := ModulesMap[use].Modules[i2].Uses\n//\t\t\t\t\t\t\tfor i3 := range mUses {\n//\t\t\t\t\t\t\t\tmUses[i3] = filepath.Join(filepath.Dir(use), mUses[i3])\n//\t\t\t\t\t\t\t}\n//\t\t\t\t\t\t}\n//\t\t\t\t\t}\n//\t\t\t\t\tmoduleConfig.Modules[i].Children = append(moduleConfig.Modules[i].Children, ModulesMap[use])\n//\t\t\t\t}\n//\t\t\t}\n//\t\t}\n//\t}\n//\tfor s := range ModulesMap {\n//\t\tif usedModuleMap[s] {\n//\t\t\tcontinue\n//\t\t}\n//\t\tvar tempModules = new(ModuleConfig)\n//\t\tb, err := json.Marshal(ModulesMap[s])\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t\terr = json.Unmarshal(b, tempModules)\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t\tfor i := range tempModules.Modules {\n//\t\t\tresultModules = append(resultModules, deepFmtModules(&tempModules.Modules[i], usedModuleMap)...)\n//\t\t}\n//\t}\n//\n//\tvar resultM = new(ModuleConfig)\n//\tresultM.Modules = resultModules\n//\treturn yaml.Marshal(resultM)\n//}\n//\n//func deepFmtModules(module *Module, usedModuleMap map[string]bool) []Module {\n//\tvar output []Module\n//\tfor i := 0; i < len(module.Uses); i++ {\n//\t\tif usedModuleMap[module.Uses[i]] {\n//\t\t\tmodule.Uses = append(module.Uses[:i], module.Uses[i+1:]...)\n//\t\t\ti--\n//\t\t}\n//\t}\n//\tif len(module.Children) != 0 {\n//\t\tfor i := range module.Children {\n//\t\t\tfor i2 := range module.Children[i].Modules {\n//\t\t\t\tmodule.Children[i].Modules[i2].Name = module.Name + \".\" + module.Children[i].Modules[i2].Name\n//\t\t\t}\n//\t\t\tfor i3 := range module.Children[i].Modules {\n//\t\t\t\toutput = append(output, deepFmtModules(&module.Children[i].Modules[i3], usedModuleMap)...)\n//\t\t\t}\n//\t\t}\n//\t}\n//\toutput = append(output, *module)\n//\treturn output\n//}\n//\n//func makeCyclePathMap(nodesMap map[string]*yaml.Node) (map[string][]string, error) {\n//\tvar userMap = make(map[string][]string)\n//\tfor modulePath, node := range nodesMap {\n//\t\tuserMap[modulePath] = make([]string, 0)\n//\t\tvar modules ModuleConfig\n//\t\tnodeByte, err := yaml.Marshal(node)\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t\terr = yaml.Unmarshal(nodeByte, &modules)\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t\tfor _, module := range modules.Modules {\n//\t\t\tfor _, use := range module.Uses {\n//\t\t\t\twaitPath := use\n//\t\t\t\tif nodesMap[waitPath] != nil {\n//\t\t\t\t\tuserMap[modulePath] = append(userMap[modulePath], waitPath)\n//\t\t\t\t}\n//\t\t\t}\n//\t\t}\n//\t}\n//\treturn userMap, nil\n//}\n//\n//func checkCycle(cyclePathMap map[string][]string, path string, pathList []string, cyclePathList *[]string) bool {\n//\tfor _, p := range pathList {\n//\t\t*cyclePathList = append(*cyclePathList, p)\n//\t\tif p == path {\n//\t\t\treturn true\n//\t\t}\n//\t\tif checkCycle(cyclePathMap, path, cyclePathMap[p], cyclePathList) {\n//\t\t\treturn true\n//\t\t}\n//\t\t*cyclePathList = (*cyclePathList)[:len(*cyclePathList)-1]\n//\t}\n//\treturn false\n//}\n//\n//func GetConfigPath() (string, error) {\n//\n//\tconfigMap, err := readAllConfig(global.WorkSpace())\n//\tif err != nil {\n//\t\treturn \"\", err\n//\t}\n//\tif err != nil {\n//\t\treturn \"\", err\n//\t}\n//\n//\tclientMap := configMap[SELEFRA]\n//\tfor cofPath := range clientMap {\n//\t\treturn cofPath, nil\n//\t}\n//\treturn \"\", errors.New(\"No config file found\")\n//}\n//\n//func GetRules() (RuleSet, error) {\n//\tvar rules RuleSet\n//\tconfigMap, err := readAllConfig(global.WorkSpace())\n//\tif err != nil {\n//\t\treturn rules, err\n//\t}\n//\tfor rulePath, rule := range configMap[RULES] {\n//\t\tvar baseRule RuleSet\n//\t\terr := yaml.Unmarshal([]byte(rule), &baseRule)\n//\t\tif err != nil {\n//\t\t\treturn RuleSet{}, err\n//\t\t}\n//\t\tfor i := range baseRule.Rules {\n//\t\t\tbaseRule.Rules[i].Path = rulePath\n//\t\t\tcli_ui.Infof(\"\t%s - Rule %s: loading ... \\n\", rulePath, baseRule.Rules[i].Name)\n//\t\t}\n//\t\trules.Rules = append(rules.Rules, baseRule.Rules...)\n//\t}\n//\treturn rules, err\n//}\n//\n//func (c *RootConfig) TestConfigByNode() error {\n//\tconfigMap, err := readAllConfig(global.WorkSpace())\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tclientMap := configMap[SELEFRA]\n//\n//\tfor pathStr, configStr := range clientMap {\n//\t\tvar selefraMap = make(map[string]*yaml.Node)\n//\t\tselefraMap[\"cloud\"] = new(yaml.Node)\n//\t\tselefraMap[\"cli_version\"] = nil\n//\t\tselefraMap[\"name\"] = nil\n//\t\tselefraMap[\"connection\"] = new(yaml.Node)\n//\t\tselefraMap[\"log_level\"] = new(yaml.Node)\n//\t\tselefraMap[\"providers\"] = nil\n//\t\tbodyNode := new(yaml.Node)\n//\t\terr := yaml.Unmarshal([]byte(configStr), bodyNode)\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\t\terr = checkNode(selefraMap, bodyNode.Content[0].Content[1].Content, pathStr, \"selefra:\")\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\n//\t\tfor index, node := range selefraMap[\"providers\"].Content {\n//\t\t\tvar providersMap = make(map[string]*yaml.Node)\n//\t\t\tprovidersMap[\"name\"] = nil\n//\t\t\tprovidersMap[\"source\"] = nil\n//\t\t\tprovidersMap[\"version\"] = nil\n//\t\t\tprovidersMap[\"path\"] = new(yaml.Node)\n//\t\t\tyamlPath := fmt.Sprintf(\"selefra.providers[%d]:\", index)\n//\t\t\terr = checkNode(providersMap, node.Content, pathStr, yamlPath)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t}\n//\n//\t}\n//\n//\tmodulesMap := configMap[MODULES]\n//\n//\tfor pathStr, modulesStr := range modulesMap {\n//\t\tvar modulesNode = new(yaml.Node)\n//\t\terr := yaml.Unmarshal([]byte(modulesStr), modulesNode)\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\t\tfor _, node := range modulesNode.Content[0].Content[1].Content {\n//\t\t\tvar ModuleMap = make(map[string]*yaml.Node)\n//\t\t\tModuleMap[\"name\"] = nil\n//\t\t\tModuleMap[\"uses\"] = nil\n//\t\t\tModuleMap[\"input\"] = new(yaml.Node)\n//\n//\t\t\terr = checkNode(ModuleMap, node.Content, pathStr, \"modules:\")\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t}\n//\t}\n//\n//\trulesMap := configMap[RULES]\n//\tfor pathStr, rulesStr := range rulesMap {\n//\t\tvar rulesNode = new(yaml.Node)\n//\t\terr := yaml.Unmarshal([]byte(rulesStr), rulesNode)\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\t\tfor index, node := range rulesNode.Content[0].Content[1].Content {\n//\t\t\tvar ruleMap = make(map[string]*yaml.Node)\n//\t\t\truleMap[\"name\"] = nil\n//\t\t\truleMap[\"input\"] = new(yaml.Node)\n//\t\t\truleMap[\"query\"] = nil\n//\t\t\truleMap[\"labels\"] = nil\n//\t\t\truleMap[\"interval\"] = new(yaml.Node)\n//\t\t\truleMap[\"metadata\"] = nil\n//\t\t\truleMap[\"output\"] = nil\n//\t\t\tyamlPath := fmt.Sprintf(\"rules[%d]\", index)\n//\t\t\terr = checkNode(ruleMap, node.Content, pathStr, yamlPath+\":\")\n//\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\n//\t\t\tfor i := range ruleMap[\"input\"].Content {\n//\t\t\t\tif i%2 != 0 {\n//\t\t\t\t\tvar ruleInputMap = make(map[string]*yaml.Node)\n//\t\t\t\t\truleInputMap[\"type\"] = nil\n//\t\t\t\t\truleInputMap[\"description\"] = nil\n//\t\t\t\t\truleInputMap[\"default\"] = nil\n//\t\t\t\t\terr = checkNode(ruleInputMap, ruleMap[\"input\"].Content[i].Content, pathStr, yamlPath+\"input:\")\n//\t\t\t\t\tif err != nil {\n//\t\t\t\t\t\treturn err\n//\t\t\t\t\t}\n//\t\t\t\t}\n//\t\t\t}\n//\n//\t\t\tfor i := range ruleMap[\"metadata\"].Content {\n//\t\t\t\tif i%2 != 0 {\n//\t\t\t\t\tvar ruleMetadataMap = make(map[string]*yaml.Node)\n//\t\t\t\t\truleMetadataMap[\"id\"] = nil\n//\t\t\t\t\truleMetadataMap[\"severity\"] = nil\n//\t\t\t\t\truleMetadataMap[\"provider\"] = nil\n//\t\t\t\t\truleMetadataMap[\"tags\"] = new(yaml.Node)\n//\t\t\t\t\truleMetadataMap[\"remediation\"] = nil\n//\t\t\t\t\truleMetadataMap[\"title\"] = nil\n//\t\t\t\t\truleMetadataMap[\"author\"] = nil\n//\t\t\t\t\truleMetadataMap[\"description\"] = nil\n//\t\t\t\t\terr = checkNode(ruleMetadataMap, ruleMap[\"metadata\"].Content, pathStr, yamlPath+\"metadata:\")\n//\t\t\t\t\tif err != nil {\n//\t\t\t\t\t\treturn err\n//\t\t\t\t\t}\n//\t\t\t\t}\n//\t\t\t}\n//\n//\t\t}\n//\t}\n//\n//\treturn nil\n//}\n//\n//func hasKeys(key string, keys []string) bool {\n//\tfor _, v := range keys {\n//\t\tif v == key {\n//\t\t\treturn true\n//\t\t}\n//\t}\n//\treturn false\n//}\n//\n//func checkNode(configMap map[string]*yaml.Node, bodyNode []*yaml.Node, pathStr string, yamlPath string) error {\n//\tvar keys []string\n//\tfor s := range configMap {\n//\t\tkeys = append(keys, s)\n//\t}\n//\tfor i := range bodyNode {\n//\t\tif i == len(bodyNode)-1 || i%2 != 0 {\n//\t\t\tcontinue\n//\t\t}\n//\n//\t\tif !hasKeys(bodyNode[i].Value, keys) {\n//\t\t\terrStr := fmt.Sprintf(\"Illegal configuration exists %s,Occurrence location %s %d:%d\", bodyNode[i].Value, pathStr, bodyNode[i].Line, bodyNode[i].Column)\n//\t\t\treturn errors.New(errStr)\n//\t\t}\n//\t\tconfigMap[bodyNode[i].Value] = bodyNode[i+1]\n//\t}\n//\tfor key, node := range configMap {\n//\t\tif node == nil {\n//\t\t\terrStr := fmt.Sprintf(\"%s %s Missing configuration %s\", pathStr, yamlPath, key)\n//\t\t\treturn errors.New(errStr)\n//\t\t}\n//\t}\n//\treturn nil\n//}\n//\n//func (c *RootConfig) GetConfigWithViper() (*viper.Viper, error) {\n//\tconfig := viper.New()\n//\tconfig.SetConfigType(\"yaml\")\n//\tclientByte, err := GetClientStr()\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\terr = config.ReadConfig(bytes.NewBuffer(clientByte))\n//\tif err != nil {\n//\t\treturn config, err\n//\t}\n//\terr = yaml.Unmarshal(clientByte, &c)\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\tglobal.SetLogLevel(c.Selefra.LogLevel)\n//\tglobal.SERVER = c.Selefra.GetHostName()\n//\treturn config, nil\n//}\n//\n//func GetModules() ([]Module, error) {\n//\tvar modules ModuleConfig\n//\tmodulesStr, err := GetModulesStr()\n//\tif err != nil {\n//\t\treturn modules.Modules, err\n//\t}\n//\terr = yaml.Unmarshal(modulesStr, &modules)\n//\tif err != nil {\n//\t\treturn modules.Modules, err\n//\t}\n//\n//\treturn modules.Modules, nil\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n//\n//// IsSelefraWorkspace Determine whether the current path is the workspace of Selefra\n//func IsSelefraWorkspace() error {\n//\tconfigMap, err := readDirectoryAllConfigWithoutRecursion(*global.WORKSPACE)\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tif configMap[SELEFRA] == nil {\n//\t\treturn fmt.Errorf(\"the path %s is not a valid Selefra workspace. yaml files in a valid workspace must contain 'selefra' block\", pointer.FromStringPointer(global.WORKSPACE))\n//\t}\n//\treturn nil\n//}\n//\n//// Read all the yaml configuration files in the given folder\n//// ConfigMap: map[string]map[string]string <--- map[blockName]map[yamlFilePath]blockStringContent\n//func readDirectoryAllConfigWithoutRecursion(dirname string) (ConfigMap, error) {\n//\tconfigMap := make(ConfigMap)\n//\tfiles, err := os.ReadDir(dirname)\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\tfor _, file := range files {\n//\t\tif file.IsDir() || !isYamlFileByExtension(path.Ext(file.Name())) {\n//\t\t\tcontinue\n//\t\t}\n//\t\tf, _ := file.Info()\n//\t\terr = readConfigFile(dirname, configMap, f)\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t}\n//\treturn configMap, nil\n//}\n//\n//// IsSelefraWorkspace Determine whether the current path is the workspace of Selefra\n//func IsSelefraWorkspace() error {\n//\tconfigMap, err := readDirectoryAllConfigWithoutRecursion(*global.WORKSPACE)\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tif configMap[SELEFRA] == nil {\n//\t\treturn fmt.Errorf(\"the path %s is not a valid Selefra workspace. yaml files in a valid workspace must contain 'selefra' block\", pointer.FromStringPointer(global.WORKSPACE))\n//\t}\n//\treturn nil\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst HomeSelefraRCConfigFileName = \"selefra.rc\"\n\ntype HomeSelefraRCConfig struct {\n\tDownloadCacheDirectory string `yaml:\"download-cache-directory\" json:\"download-cache-directory\"`\n}\n\nfunc GetHomeSelefraRCConfigPath() (string, error) {\n\tworkspacePath, err := GetSelefraHomeWorkspacePath()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(workspacePath, HomeSelefraRCConfigFileName), nil\n}\n\nfunc ReadHomeSelefraRCConfig() (*HomeSelefraRCConfig, error) {\n\tconfigPath, err := GetHomeSelefraRCConfigPath()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfileBytes, err := os.ReadFile(configPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc := new(HomeSelefraRCConfig)\n\terr = yaml.Unmarshal(fileBytes, &c)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn c, nil\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc GetDefaultDownloadCacheDirectory() (string, error) {\n\n\t// 1. first read from selefra.rc\n\tselefraConfig, _ := ReadHomeSelefraRCConfig()\n\tif selefraConfig != nil && selefraConfig.DownloadCacheDirectory != \"\" {\n\t\treturn selefraConfig.DownloadCacheDirectory, nil\n\t}\n\n\t// 2. use default download path\n\tworkspacePath, err := GetSelefraHomeWorkspacePath()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(workspacePath, \"downloads\"), nil\n}\n\nfunc initHomeSelefraRCConfig() error {\n\tconfigPath, err := GetHomeSelefraRCConfigPath()\n\tif err != nil {\n\t\treturn err\n\t}\n\t// TODO Put the documentation in the configuration file\n\thomeSelefraRCConfigInitContent := `\n# You can specify the directory in which selefra downloads files. If not, the default is ~/.selefra/download\n# download-cache-directory: /data1/mnt\n`\n\treturn utils.EnsureFileExists(configPath, []byte(homeSelefraRCConfigInitContent))\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst SelefraHomeWorkspaceDirectoryName = \".selefra\"\n\n// GetSelefraHomeWorkspacePath selefra will store temporary files in home directory, in its own separate fixed path\nfunc GetSelefraHomeWorkspacePath() (string, error) {\n\thome, err := homedir.Dir()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"get home dir error: %s\", err.Error())\n\t}\n\treturn filepath.Join(home, SelefraHomeWorkspaceDirectoryName), nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "config/config_test.go",
    "content": "package config\n//\n//import (\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestGetAllConfig(t *testing.T) {\n//\tglobal.Init(\"\", global.WithWorkspace(\"../tests/workspace/offline\"))\n//\n//\tfileMap, err := FileMap(global.WorkSpace())\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\tif len(fileMap) == 0 {\n//\t\tt.Error(\"fileMap is empty\")\n//\t}\n//}\n//\n//func TestIsSelefra(t *testing.T) {\n//\tglobal.Init(\"\", global.WithWorkspace(\"../tests/workspace/offline\"))\n//\terr := IsSelefra()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n//\n//func TestGetModulesByPath(t *testing.T) {\n//\tglobal.Init(\"\", global.WithWorkspace(\"../tests/workspace/offline\"))\n//\tmodules, err := GetModules()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\tif len(modules) == 0 {\n//\t\tt.Error(\"modules is empty\")\n//\t}\n//}\n//\n//func TestGetConfigPath(t *testing.T) {\n//\tglobal.Init(\"\", global.WithWorkspace(\"../tests/workspace/offline\"))\n//\tpath, err := GetConfigPath()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\tif len(path) == 0 {\n//\t\tt.Error(\"path is empty\")\n//\t}\n//}\n//\n//func TestGetClientStr(t *testing.T) {\n//\tglobal.Init(\"\", global.WithWorkspace(\"../tests/workspace/offline\"))\n//\tclientStr, err := GetClientStr()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\tif len(clientStr) == 0 {\n//\t\tt.Error(\"clientStr is empty\")\n//\t}\n//}\n//\n//func TestGetModulesStr(t *testing.T) {\n//\tglobal.Init(\"\", global.WithWorkspace(\"../tests/workspace/offline\"))\n//\tmodulesStr, err := GetModulesStr()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\tif len(modulesStr) == 0 {\n//\t\tt.Error(\"modulesStr is empty\")\n//\t}\n//}\n//\n//func TestGetRules(t *testing.T) {\n//\tglobal.Init(\"\", global.WithWorkspace(\"../tests/workspace/offline\"))\n//\trules, err := GetRules()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\tfor i := range rules.Rules {\n//\t\tif len(rules.Rules[i].Name) == 0 {\n//\t\t\tt.Error(\"rule name is empty\")\n//\t\t}\n//\t}\n//}\n"
  },
  {
    "path": "config/downloader.go",
    "content": "package config\n\n//import (\n//\t\"fmt\"\n//\t\"io\"\n//\t\"net/http\"\n//\t\"net/url\"\n//)\n//\n//type Downloader struct {\n//\tUrl string `json:\"url\" yaml:\"url\"`\n//}\n//\n//func (d *Downloader) Get() ([]byte, error) {\n//\tvar ruleBytes []byte\n//\tu, err := url.Parse(d.Url)\n//\tif err != nil {\n//\t\treturn nil, fmt.Errorf(\"download error: %s\", err.Error())\n//\t}\n//\tswitch u.Scheme {\n//\tcase \"http\", \"https\":\n//\t\tresp, err := http.Get(d.Url)\n//\t\tif err != nil {\n//\t\t\treturn nil, fmt.Errorf(\"download error: %s\", err.Error())\n//\t\t}\n//\t\tdefer resp.Body.Close()\n//\t\truleBytes, err = io.ReadAll(resp.Body)\n//\t\tif err != nil {\n//\t\t\treturn nil, fmt.Errorf(\"download error: %s\", err.Error())\n//\t\t}\n//\tcase \"s3\":\n//\t\t//query := u.Query()\n//\t\t//sess := session.Must(session.NewSession(&aws.Config{\n//\t\t//\tRegion: aws.String(query.Get(\"region\")),\n//\t\t//}))\n//\t\t//service := s3.New(sess)\n//\t\t//\n//\t\t//ctx, cancel := context.WithTimeout(context.Background(), time.Duration(30)*time.Second)\n//\t\t//defer cancel()\n//\t\t//bucket := u.Host\n//\t\t//key := u.Path\n//\t\t//out, err := service.GetObjectWithContext(ctx, &s3.GetObjectInput{\n//\t\t//\tBucket: aws.String(bucket),\n//\t\t//\tKey:    aws.String(key),\n//\t\t//})\n//\t\t//if err != nil {\n//\t\t//\treturn nil, fmt.Errorf(\"download error:%s\", err.Error())\n//\t\t//}\n//\t\t//defer out.Body.Close()\n//\t\t//ruleBytes, err = io.ReadAll(out.Body)\n//\t\t//if err != nil {\n//\t\t//\treturn nil, fmt.Errorf(\"download error:%s\", err.Error())\n//\t\t//}\n//\t}\n//\treturn ruleBytes, nil\n//}\n"
  },
  {
    "path": "config/downloader_test.go",
    "content": "package config\n\n//import (\n//\t\"fmt\"\n//\t\"testing\"\n//)\n//\n//func TestDownloader_Get(t *testing.T) {\n//\td := Downloader{Url: \"s3://static-picture/job3.yaml?region=us-east-1\"}\n//\tb, err := d.Get()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\tfmt.Println(b)\n//\td1 := Downloader{\n//\t\tUrl: \"https://static-picture.s3.amazonaws.com/job3.yaml\",\n//\t}\n//\tb1, err1 := d1.Get()\n//\tif err1 != nil {\n//\t\tt.Error(err1)\n//\t}\n//\tfmt.Println(b1)\n//}\n"
  },
  {
    "path": "global/flag.go",
    "content": "package global\n\nimport (\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/spf13/cobra\"\n)\n\ntype Option func(variable *Variable)\n\n// Variable store some global variable\ntype Variable struct {\n\treadOnlyVariable\n\n\tmux sync.RWMutex\n\n\t// token is not empty when user is login\n\ttoken string\n\n\t// orgName is selefra cloud organization name\n\torgName string\n\n\t// stage is the build stage for current project\n\tstage string\n\n\t// projectName is local project name\n\tprojectName string\n\n\t// relvPrjName is the name of selefra cloud project name which is relevant to local project\n\trelvPrjName string\n\n\tlogLevel string\n\n\tserver string\n}\n\n// readOnlyVariable will only be set when programmer started\ntype readOnlyVariable struct {\n\tonce sync.Once\n\n\t// workspace store where selefra worked\n\tworkspace string\n\n\t// cmd store what command is running\n\tcmd string\n}\n\nvar g = Variable{\n\treadOnlyVariable: readOnlyVariable{\n\t\tonce: sync.Once{},\n\t},\n\tmux: sync.RWMutex{},\n}\n\nfunc WithWorkspace(workspace string) Option {\n\treturn func(variable *Variable) {\n\t\tvariable.workspace = workspace\n\t}\n}\n\n// Init the global variables with cmd and some options\nfunc Init(cmd string, opts ...Option) {\n\tg.once.Do(func() {\n\t\tg.cmd = cmd\n\n\t\tcwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tg.workspace = cwd\n\n\t\tfor _, opt := range opts {\n\t\t\topt(&g)\n\t\t}\n\n\t})\n}\n\n// WrappedInit wrapper the Init function to a cobra func\nfunc WrappedInit(workspace string) func(cmd *cobra.Command, args []string) {\n\treturn func(cmd *cobra.Command, args []string) {\n\t\tInit(parentCmdNames(cmd), WithWorkspace(workspace))\n\t}\n}\n\n// DefaultWrappedInit is a cobra func that will use default value to init Variable\nfunc DefaultWrappedInit() func(cmd *cobra.Command, args []string) {\n\treturn func(cmd *cobra.Command, args []string) {\n\t\tInit(parentCmdNames(cmd))\n\t}\n}\n\n// parentCmdNames find cmd's parent cmd name and join their name\nfunc parentCmdNames(cmd *cobra.Command) string {\n\tnames := make([]string, 0)\n\tvar fn func(cmd *cobra.Command)\n\tfn = func(cmd *cobra.Command) {\n\t\tif cmd.Parent() != nil {\n\t\t\tfn(cmd.Parent())\n\t\t}\n\n\t\tnames = append(names, cmd.Name())\n\t}\n\n\tfn(cmd)\n\n\treturn strings.Join(names, \" \")\n}\n\nfunc SetToken(token string) {\n\tg.mux.Lock()\n\tdefer g.mux.Unlock()\n\n\tg.token = token\n}\n\nfunc SetStage(stage string) {\n\tg.mux.Lock()\n\tdefer g.mux.Unlock()\n\n\tg.stage = stage\n}\n\nfunc SetOrgName(orgName string) {\n\tg.mux.Lock()\n\tdefer g.mux.Unlock()\n\n\tg.orgName = orgName\n}\n\nfunc SetProjectName(prjName string) {\n\tg.mux.Lock()\n\tdefer g.mux.Unlock()\n\n\tg.projectName = prjName\n}\n\nfunc ProjectName() string {\n\tg.mux.RLock()\n\tdefer g.mux.RUnlock()\n\n\treturn g.projectName\n}\n\nfunc SetRelvPrjName(name string) {\n\tg.mux.Lock()\n\tdefer g.mux.Unlock()\n\n\tg.relvPrjName = name\n}\n\nfunc RelvPrjName() string {\n\tg.mux.RLock()\n\tdefer g.mux.RUnlock()\n\n\treturn g.relvPrjName\n}\n\nfunc SetLogLevel(level string) {\n\tg.mux.Lock()\n\tdefer g.mux.Unlock()\n\n\tg.logLevel = level\n}\n\nfunc WorkSpace() string {\n\treturn g.workspace\n}\n\nfunc Token() string {\n\tg.mux.RLock()\n\tdefer g.mux.RUnlock()\n\n\treturn g.token\n}\n\nfunc OrgName() string {\n\tg.mux.RLock()\n\tdefer g.mux.RUnlock()\n\n\treturn g.orgName\n}\n\nfunc Cmd() string {\n\treturn g.cmd\n}\n\nfunc Stage() string {\n\tg.mux.RLock()\n\tdefer g.mux.RUnlock()\n\n\treturn g.stage\n}\n\nfunc LogLevel() string {\n\tg.mux.RLock()\n\tdefer g.mux.RUnlock()\n\n\treturn g.logLevel\n}\n\nconst PkgBasePath = \"ghcr.io/selefra/postgre_\"\nconst PkgTag = \":latest\"\n\nvar SERVER = \"main-api.selefra.io\"\n"
  },
  {
    "path": "global/flag_test.go",
    "content": "package global\n\nimport (\n\t\"github.com/spf13/cobra\"\n\t\"github.com/stretchr/testify/require\"\n\t\"testing\"\n)\n\nfunc Test_Variable(t *testing.T) {\n\tInit(\"cmd\", WithWorkspace(\"/path/to/workspace\"))\n\n\trequire.Equal(t, \"cmd\", Cmd())\n\n\trequire.Equal(t, \"/path/to/workspace\", WorkSpace())\n\n\t// Init only do once\n\tInit(\"cmd1\", WithWorkspace(\"/fake/workspace\"))\n\n\trequire.Equal(t, \"cmd\", Cmd())\n\n\trequire.Equal(t, \"/path/to/workspace\", WorkSpace())\n}\n\nfunc Test_parentCmdNames(t *testing.T) {\n\tcmd_a := &cobra.Command{\n\t\tUse: \"a\",\n\t}\n\n\tcmd_b := &cobra.Command{\n\t\tUse: \"b\",\n\t}\n\n\t//cmd_c := &cobra.Command{\n\t//\tUse: \"c\",\n\t//}\n\n\tcmd_a.AddCommand(cmd_b)\n\n\trequire.Equal(t, []string{\"a\", \"b\"}, parentCmdNames(cmd_b))\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/selefra/selefra\n\ngo 1.19\n\nrequire (\n\tgithub.com/aws/aws-sdk-go v1.44.149 // indirect\n\tgithub.com/c-bata/go-prompt v0.2.6\n\tgithub.com/containerd/containerd v1.6.18\n\tgithub.com/fatih/color v1.14.1\n\tgithub.com/gizak/termui/v3 v3.1.0\n\tgithub.com/google/uuid v1.3.0\n\tgithub.com/hashicorp/go-getter v1.7.0\n\tgithub.com/hashicorp/go-hclog v1.3.1\n\tgithub.com/hashicorp/go-plugin v1.4.6\n\tgithub.com/mitchellh/go-homedir v1.1.0\n\tgithub.com/natefinch/lumberjack v2.0.0+incompatible\n\tgithub.com/olekukonko/tablewriter v0.0.5\n\tgithub.com/selefra/selefra-provider-sdk v0.0.23-0.20230818075347-cef95b1e16a5\n\tgithub.com/selefra/selefra-utils v0.0.4\n\tgithub.com/songzhibin97/gkit v1.2.7\n\tgithub.com/spf13/cobra v1.6.1\n\tgithub.com/spf13/viper v1.14.0 // indirect\n\tgithub.com/stretchr/testify v1.8.4\n\tgithub.com/vbauerster/mpb/v7 v7.5.3\n\tgo.uber.org/zap v1.23.0\n\tgoogle.golang.org/grpc v1.51.0\n\tgoogle.golang.org/protobuf v1.28.1\n\tgopkg.in/yaml.v3 v3.0.1\n\toras.land/oras-go v1.2.1\n)\n\nrequire (\n\tcloud.google.com/go v0.107.0 // indirect\n\tcloud.google.com/go/compute v1.12.1 // indirect\n\tcloud.google.com/go/compute/metadata v0.2.1 // indirect\n\tcloud.google.com/go/iam v0.7.0 // indirect\n\tcloud.google.com/go/storage v1.28.0 // indirect\n\tgithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect\n\tgithub.com/Masterminds/squirrel v1.5.3 // indirect\n\tgithub.com/Microsoft/go-winio v0.6.0 // indirect\n\tgithub.com/VividCortex/ewma v1.2.0 // indirect\n\tgithub.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect\n\tgithub.com/beorn7/perks v1.0.1 // indirect\n\tgithub.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect\n\tgithub.com/cespare/xxhash/v2 v2.1.2 // indirect\n\tgithub.com/davecgh/go-spew v1.1.1 // indirect\n\tgithub.com/docker/cli v20.10.21+incompatible // indirect\n\tgithub.com/docker/distribution v2.8.1+incompatible // indirect\n\tgithub.com/docker/docker v20.10.21+incompatible // indirect\n\tgithub.com/docker/docker-credential-helpers v0.7.0 // indirect\n\tgithub.com/docker/go-connections v0.4.0 // indirect\n\tgithub.com/docker/go-metrics v0.0.1 // indirect\n\tgithub.com/docker/go-units v0.5.0 // indirect\n\tgithub.com/doug-martin/goqu/v9 v9.18.0 // indirect\n\tgithub.com/emirpasic/gods v1.18.1 // indirect\n\tgithub.com/fsnotify/fsnotify v1.6.0 // indirect\n\tgithub.com/gogo/protobuf v1.3.2 // indirect\n\tgithub.com/golang-infrastructure/go-heap v0.0.2 // indirect\n\tgithub.com/golang-infrastructure/go-queue v0.0.0-20221128180429-701892f44bcc // indirect\n\tgithub.com/golang-infrastructure/go-reflect-utils v0.0.0-20221130143747-965ef2eb09c3 // indirect\n\tgithub.com/golang-infrastructure/go-stack v0.0.2 // indirect\n\tgithub.com/golang-infrastructure/go-tuple v0.0.0-20221215155811-4ed54fe7d579 // indirect\n\tgithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect\n\tgithub.com/golang/protobuf v1.5.2 // indirect\n\tgithub.com/google/go-cmp v0.5.9 // indirect\n\tgithub.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect\n\tgithub.com/googleapis/gax-go/v2 v2.7.0 // indirect\n\tgithub.com/gorilla/mux v1.8.0 // indirect\n\tgithub.com/hashicorp/go-cleanhttp v0.5.2 // indirect\n\tgithub.com/hashicorp/go-safetemp v1.0.0 // indirect\n\tgithub.com/hashicorp/hcl v1.0.0 // indirect\n\tgithub.com/hashicorp/yamux v0.1.1 // indirect\n\tgithub.com/inconshreveable/mousetrap v1.0.1 // indirect\n\tgithub.com/jackc/chunkreader/v2 v2.0.1 // indirect\n\tgithub.com/jackc/pgconn v1.13.0 // indirect\n\tgithub.com/jackc/pgio v1.0.0 // indirect\n\tgithub.com/jackc/pgpassfile v1.0.0 // indirect\n\tgithub.com/jackc/pgproto3/v2 v2.3.1 // indirect\n\tgithub.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect\n\tgithub.com/jackc/pgtype v1.12.0 // indirect\n\tgithub.com/jackc/pgx/v4 v4.17.2 // indirect\n\tgithub.com/jackc/puddle v1.3.0 // indirect\n\tgithub.com/jmespath/go-jmespath v0.4.0 // indirect\n\tgithub.com/json-iterator/go v1.1.12 // indirect\n\tgithub.com/klauspost/compress v1.15.12 // indirect\n\tgithub.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect\n\tgithub.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect\n\tgithub.com/magiconair/properties v1.8.6 // indirect\n\tgithub.com/mattn/go-colorable v0.1.13 // indirect\n\tgithub.com/mattn/go-isatty v0.0.17 // indirect\n\tgithub.com/mattn/go-runewidth v0.0.14 // indirect\n\tgithub.com/mattn/go-tty v0.0.4 // indirect\n\tgithub.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect\n\tgithub.com/mitchellh/go-testing-interface v1.14.1 // indirect\n\tgithub.com/mitchellh/go-wordwrap v1.0.1 // indirect\n\tgithub.com/mitchellh/mapstructure v1.5.0 // indirect\n\tgithub.com/moby/locker v1.0.1 // indirect\n\tgithub.com/moby/term v0.0.0-20221120202655-abb19827d345 // indirect\n\tgithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect\n\tgithub.com/modern-go/reflect2 v1.0.2 // indirect\n\tgithub.com/morikuni/aec v1.0.0 // indirect\n\tgithub.com/nsf/termbox-go v1.1.1 // indirect\n\tgithub.com/oklog/run v1.1.0 // indirect\n\tgithub.com/opencontainers/go-digest v1.0.0 // indirect\n\tgithub.com/opencontainers/image-spec v1.1.0-rc2 // indirect\n\tgithub.com/pelletier/go-toml v1.9.5 // indirect\n\tgithub.com/pelletier/go-toml/v2 v2.0.6 // indirect\n\tgithub.com/pkg/errors v0.9.1 // indirect\n\tgithub.com/pkg/term v1.2.0-beta.2 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.0 // indirect\n\tgithub.com/prometheus/client_golang v1.14.0 // indirect\n\tgithub.com/prometheus/client_model v0.3.0 // indirect\n\tgithub.com/prometheus/common v0.37.0 // indirect\n\tgithub.com/prometheus/procfs v0.8.0 // indirect\n\tgithub.com/rivo/uniseg v0.4.3 // indirect\n\tgithub.com/satori/go.uuid v1.2.0 // indirect\n\tgithub.com/segmentio/backo-go v1.0.1 // indirect\n\tgithub.com/sirupsen/logrus v1.9.0 // indirect\n\tgithub.com/songzhibin97/go-ognl v0.0.3-0.20220831071354-b4f21269f576 // indirect\n\tgithub.com/spf13/afero v1.9.3 // indirect\n\tgithub.com/spf13/cast v1.5.0 // indirect\n\tgithub.com/spf13/jwalterweatherman v1.1.0 // indirect\n\tgithub.com/spf13/pflag v1.0.5 // indirect\n\tgithub.com/subosito/gotenv v1.4.1 // indirect\n\tgithub.com/tidwall/gjson v1.14.4 // indirect\n\tgithub.com/tidwall/match v1.1.1 // indirect\n\tgithub.com/tidwall/pretty v1.2.1 // indirect\n\tgithub.com/ulikunitz/xz v0.5.10 // indirect\n\tgo.opencensus.io v0.24.0 // indirect\n\tgo.uber.org/atomic v1.10.0 // indirect\n\tgo.uber.org/goleak v1.1.12 // indirect\n\tgo.uber.org/multierr v1.8.0 // indirect\n\tgolang.org/x/crypto v0.7.0 // indirect\n\tgolang.org/x/net v0.8.0 // indirect\n\tgolang.org/x/oauth2 v0.2.0 // indirect\n\tgolang.org/x/sync v0.1.0 // indirect\n\tgolang.org/x/text v0.8.0 // indirect\n\tgolang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect\n\tgoogle.golang.org/api v0.103.0 // indirect\n\tgoogle.golang.org/appengine v1.6.7 // indirect\n\tgoogle.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect\n\tgopkg.in/ini.v1 v1.67.0 // indirect\n\tgopkg.in/yaml.v2 v2.4.0 // indirect\n)\n\nrequire (\n\tgithub.com/golang-infrastructure/go-trie v0.0.0-20230204150600-10750ecebaec\n\tgithub.com/hashicorp/go-version v1.6.0\n\tgithub.com/rudderlabs/analytics-go/v4 v4.1.0\n\tgithub.com/sashabaranov/go-openai v1.5.7\n\tgolang.org/x/sys v0.10.0\n)\n"
  },
  {
    "path": "go.sum",
    "content": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=\ncloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=\ncloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=\ncloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=\ncloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=\ncloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=\ncloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=\ncloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=\ncloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=\ncloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=\ncloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=\ncloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=\ncloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=\ncloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=\ncloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=\ncloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=\ncloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=\ncloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=\ncloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=\ncloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=\ncloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=\ncloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=\ncloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=\ncloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=\ncloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=\ncloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=\ncloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=\ncloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=\ncloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=\ncloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=\ncloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU=\ncloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=\ncloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww=\ncloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I=\ncloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=\ncloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=\ncloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=\ncloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4=\ncloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=\ncloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0=\ncloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=\ncloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk=\ncloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=\ncloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s=\ncloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0=\ncloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=\ncloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw=\ncloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI=\ncloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=\ncloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8=\ncloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=\ncloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=\ncloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=\ncloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=\ncloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=\ncloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=\ncloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA=\ncloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY=\ncloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s=\ncloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM=\ncloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI=\ncloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY=\ncloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI=\ncloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=\ncloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=\ncloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=\ncloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=\ncloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=\ncloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=\ncloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=\ncloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=\ncloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=\ncloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=\ncloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=\ncloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=\ncloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=\ncloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=\ncloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=\ncloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc=\ncloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=\ncloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ=\ncloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo=\ncloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE=\ncloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I=\ncloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ=\ncloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo=\ncloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA=\ncloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=\ncloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=\ncloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo=\ncloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ=\ncloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4=\ncloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0=\ncloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8=\ncloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU=\ncloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU=\ncloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y=\ncloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg=\ncloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk=\ncloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w=\ncloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk=\ncloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg=\ncloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM=\ncloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA=\ncloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o=\ncloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A=\ncloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0=\ncloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0=\ncloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=\ncloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=\ncloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=\ncloud.google.com/go/iam v0.7.0 h1:k4MuwOsS7zGJJ+QfZ5vBK8SgHBAvYN/23BWsiihJ1vs=\ncloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg=\ncloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=\ncloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=\ncloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=\ncloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08=\ncloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=\ncloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=\ncloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=\ncloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE=\ncloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM=\ncloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY=\ncloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s=\ncloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=\ncloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o=\ncloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ=\ncloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU=\ncloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY=\ncloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34=\ncloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs=\ncloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg=\ncloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E=\ncloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU=\ncloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0=\ncloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA=\ncloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0=\ncloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI=\ncloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=\ncloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=\ncloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=\ncloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=\ncloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=\ncloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o=\ncloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk=\ncloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo=\ncloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg=\ncloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4=\ncloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg=\ncloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c=\ncloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y=\ncloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A=\ncloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4=\ncloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY=\ncloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s=\ncloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI=\ncloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA=\ncloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4=\ncloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0=\ncloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU=\ncloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU=\ncloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc=\ncloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs=\ncloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg=\ncloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM=\ncloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ=\ncloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=\ncloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=\ncloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=\ncloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=\ncloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=\ncloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=\ncloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=\ncloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=\ncloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=\ncloud.google.com/go/storage v1.28.0 h1:DLrIZ6xkeZX6K70fU/boWx5INJumt6f+nwwWSHXzzGY=\ncloud.google.com/go/storage v1.28.0/go.mod h1:qlgZML35PXA3zoEnIkiPLY4/TOkUleufRlu6qmcf7sI=\ncloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=\ncloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=\ncloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=\ncloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4=\ncloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0=\ncloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo=\ncloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo=\ncloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE=\ncloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg=\ncloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0=\ncloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M=\ndmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=\ngithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=\ngithub.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=\ngithub.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=\ngithub.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=\ngithub.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=\ngithub.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=\ngithub.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc=\ngithub.com/Masterminds/squirrel v1.5.3/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=\ngithub.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=\ngithub.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=\ngithub.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY=\ngithub.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=\ngithub.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=\ngithub.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=\ngithub.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=\ngithub.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=\ngithub.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=\ngithub.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=\ngithub.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=\ngithub.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=\ngithub.com/aws/aws-sdk-go v1.44.149 h1:zTWaUTbSjgMHvwhaQ91s/6ER8wMb3mA8M1GCZFO9QIo=\ngithub.com/aws/aws-sdk-go v1.44.149/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=\ngithub.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=\ngithub.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=\ngithub.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=\ngithub.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=\ngithub.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=\ngithub.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=\ngithub.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=\ngithub.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=\ngithub.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=\ngithub.com/c-bata/go-prompt v0.2.6 h1:POP+nrHE+DfLYx370bedwNhsqmpCUynWPxuHi0C5vZI=\ngithub.com/c-bata/go-prompt v0.2.6/go.mod h1:/LMAke8wD2FsNu9EXNdHxNLbd9MedkPnCdfpU9wwHfY=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=\ngithub.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=\ngithub.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=\ngithub.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=\ngithub.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=\ngithub.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=\ngithub.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=\ngithub.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=\ngithub.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=\ngithub.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=\ngithub.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=\ngithub.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=\ngithub.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=\ngithub.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=\ngithub.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=\ngithub.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=\ngithub.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=\ngithub.com/containerd/containerd v1.6.18 h1:qZbsLvmyu+Vlty0/Ex5xc0z2YtKpIsb5n45mAMI+2Ns=\ngithub.com/containerd/containerd v1.6.18/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw=\ngithub.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=\ngithub.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=\ngithub.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=\ngithub.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269 h1:hbCT8ZPPMqefiAWD2ZKjn7ypokIGViTvBBg/ExLSdCk=\ngithub.com/docker/cli v20.10.21+incompatible h1:qVkgyYUnOLQ98LtXBrwd/duVqPT2X4SHndOuGsfwyhU=\ngithub.com/docker/cli v20.10.21+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=\ngithub.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=\ngithub.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=\ngithub.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=\ngithub.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=\ngithub.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=\ngithub.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=\ngithub.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=\ngithub.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=\ngithub.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=\ngithub.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=\ngithub.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=\ngithub.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=\ngithub.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=\ngithub.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4=\ngithub.com/doug-martin/goqu/v9 v9.18.0 h1:/6bcuEtAe6nsSMVK/M+fOiXUNfyFF3yYtE07DBPFMYY=\ngithub.com/doug-martin/goqu/v9 v9.18.0/go.mod h1:nf0Wc2/hV3gYK9LiyqIrzBEVGlI8qW3GuDCEobC4wBQ=\ngithub.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=\ngithub.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=\ngithub.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=\ngithub.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=\ngithub.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=\ngithub.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=\ngithub.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=\ngithub.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=\ngithub.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=\ngithub.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=\ngithub.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=\ngithub.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=\ngithub.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=\ngithub.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=\ngithub.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=\ngithub.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=\ngithub.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=\ngithub.com/gizak/termui/v3 v3.1.0 h1:ZZmVDgwHl7gR7elfKf1xc4IudXZ5qqfDh4wExk4Iajc=\ngithub.com/gizak/termui/v3 v3.1.0/go.mod h1:bXQEBkJpzxUAKf0+xq9MSWAvWZlE7c+aidmyFlkYTrY=\ngithub.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=\ngithub.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=\ngithub.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=\ngithub.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=\ngithub.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=\ngithub.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=\ngithub.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=\ngithub.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=\ngithub.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=\ngithub.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=\ngithub.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=\ngithub.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=\ngithub.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=\ngithub.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=\ngithub.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=\ngithub.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=\ngithub.com/golang-infrastructure/go-heap v0.0.2 h1:o3u5Wr8W/MO2PjtczBOEwb7HdUgK5zV12aAdp7T28IA=\ngithub.com/golang-infrastructure/go-heap v0.0.2/go.mod h1:crBY8pM89fprjRYrEGalpJ7xEzhD+XtxiFAuF9JnCuM=\ngithub.com/golang-infrastructure/go-queue v0.0.0-20221128180429-701892f44bcc h1:+i4Y16ygfCIRKnEfwSd7cfQ+KvSMx/ALePIf8wTFRuc=\ngithub.com/golang-infrastructure/go-queue v0.0.0-20221128180429-701892f44bcc/go.mod h1:Ype8CrMpjePHOJq+wRoIN3hx10H/WQsZ5SNvHdiSsBQ=\ngithub.com/golang-infrastructure/go-reflect-utils v0.0.0-20221130143747-965ef2eb09c3 h1:jJ7AdpNdLQudsx1hiXY9iwmauHARV4/UB52KnBh9Se0=\ngithub.com/golang-infrastructure/go-reflect-utils v0.0.0-20221130143747-965ef2eb09c3/go.mod h1:zqXYxqOBa1mL2ilBK6PuH/Wb/Iego7en6XhiKWdZQHI=\ngithub.com/golang-infrastructure/go-stack v0.0.2 h1:1F+F4/s2YlAW9NI8zc4jl90WrqLlXsWfCstBRpPvVl4=\ngithub.com/golang-infrastructure/go-stack v0.0.2/go.mod h1:m7RdZjsmlvHogbsiovh1uxvxVhwzJdNsrYh1BxdcFrQ=\ngithub.com/golang-infrastructure/go-trie v0.0.0-20230204150600-10750ecebaec h1:uXN0FFGQDbrsFXIEvAy2yjRswTM5rbt0rM/gxA1Ssbw=\ngithub.com/golang-infrastructure/go-trie v0.0.0-20230204150600-10750ecebaec/go.mod h1:ilcrzsB1h8U6nQfSFq+7hO4FxWg1Pgh/miU2k0CUd/A=\ngithub.com/golang-infrastructure/go-tuple v0.0.0-20221215155811-4ed54fe7d579 h1:pQV2/ichhyLoR3aJSNXByuxtdPM2y229Rq5x9DGl5OU=\ngithub.com/golang-infrastructure/go-tuple v0.0.0-20221215155811-4ed54fe7d579/go.mod h1:cn8fHK0Sjxh7nSrnNpRa9wi1wIsmBLsjOip4LTjQz+Q=\ngithub.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=\ngithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=\ngithub.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=\ngithub.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=\ngithub.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=\ngithub.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=\ngithub.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=\ngithub.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=\ngithub.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=\ngithub.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=\ngithub.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=\ngithub.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=\ngithub.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=\ngithub.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=\ngithub.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=\ngithub.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=\ngithub.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k=\ngithub.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=\ngithub.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=\ngithub.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=\ngithub.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=\ngithub.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=\ngithub.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=\ngithub.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=\ngithub.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=\ngithub.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=\ngithub.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=\ngithub.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=\ngithub.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=\ngithub.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=\ngithub.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs=\ngithub.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=\ngithub.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=\ngithub.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=\ngithub.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=\ngithub.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=\ngithub.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=\ngithub.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=\ngithub.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=\ngithub.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=\ngithub.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=\ngithub.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=\ngithub.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=\ngithub.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=\ngithub.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=\ngithub.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=\ngithub.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=\ngithub.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=\ngithub.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=\ngithub.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=\ngithub.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=\ngithub.com/hashicorp/go-getter v1.7.0 h1:bzrYP+qu/gMrL1au7/aDvkoOVGUJpeKBgbqRHACAFDY=\ngithub.com/hashicorp/go-getter v1.7.0/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744=\ngithub.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo=\ngithub.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=\ngithub.com/hashicorp/go-plugin v1.4.6 h1:MDV3UrKQBM3du3G7MApDGvOsMYy3JQJ4exhSoKBAeVA=\ngithub.com/hashicorp/go-plugin v1.4.6/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s=\ngithub.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=\ngithub.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=\ngithub.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=\ngithub.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=\ngithub.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=\ngithub.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=\ngithub.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=\ngithub.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=\ngithub.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=\ngithub.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=\ngithub.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=\ngithub.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=\ngithub.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=\ngithub.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=\ngithub.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=\ngithub.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=\ngithub.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=\ngithub.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=\ngithub.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=\ngithub.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=\ngithub.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=\ngithub.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=\ngithub.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys=\ngithub.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI=\ngithub.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=\ngithub.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=\ngithub.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=\ngithub.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c=\ngithub.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc=\ngithub.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak=\ngithub.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=\ngithub.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=\ngithub.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=\ngithub.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=\ngithub.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=\ngithub.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=\ngithub.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=\ngithub.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=\ngithub.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=\ngithub.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y=\ngithub.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=\ngithub.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=\ngithub.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=\ngithub.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=\ngithub.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=\ngithub.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=\ngithub.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM=\ngithub.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w=\ngithub.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=\ngithub.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=\ngithub.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=\ngithub.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=\ngithub.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=\ngithub.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E=\ngithub.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw=\ngithub.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0=\ngithub.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=\ngithub.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=\ngithub.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=\ngithub.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=\ngithub.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=\ngithub.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=\ngithub.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=\ngithub.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=\ngithub.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=\ngithub.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=\ngithub.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=\ngithub.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=\ngithub.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=\ngithub.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=\ngithub.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=\ngithub.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=\ngithub.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=\ngithub.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=\ngithub.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=\ngithub.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=\ngithub.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=\ngithub.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=\ngithub.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=\ngithub.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=\ngithub.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=\ngithub.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=\ngithub.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8=\ngithub.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=\ngithub.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=\ngithub.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=\ngithub.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=\ngithub.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=\ngithub.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=\ngithub.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=\ngithub.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=\ngithub.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=\ngithub.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=\ngithub.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=\ngithub.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=\ngithub.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=\ngithub.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=\ngithub.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=\ngithub.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=\ngithub.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=\ngithub.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=\ngithub.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=\ngithub.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=\ngithub.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=\ngithub.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=\ngithub.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=\ngithub.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=\ngithub.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=\ngithub.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=\ngithub.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=\ngithub.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=\ngithub.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=\ngithub.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=\ngithub.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=\ngithub.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0=\ngithub.com/mattn/go-tty v0.0.4 h1:NVikla9X8MN0SQAqCYzpGyXv0jY7MNl3HOWD2dkle7E=\ngithub.com/mattn/go-tty v0.0.4/go.mod h1:u5GGXBtZU6RQoKV8gY5W6UhMudbR5vXnUe7j3pxse28=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=\ngithub.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=\ngithub.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=\ngithub.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=\ngithub.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=\ngithub.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=\ngithub.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=\ngithub.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=\ngithub.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=\ngithub.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=\ngithub.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=\ngithub.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=\ngithub.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI=\ngithub.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU=\ngithub.com/moby/term v0.0.0-20221120202655-abb19827d345/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=\ngithub.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=\ngithub.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=\ngithub.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=\ngithub.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=\ngithub.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=\ngithub.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=\ngithub.com/nsf/termbox-go v1.1.1 h1:nksUPLCb73Q++DwbYUBEglYBRPZyoXJdrj5L+TkjyZY=\ngithub.com/nsf/termbox-go v1.1.1/go.mod h1:T0cTdVuOwf7pHQNtfhnEbzHbcNyCEcVU4YPpouCbVxo=\ngithub.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=\ngithub.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=\ngithub.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=\ngithub.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=\ngithub.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=\ngithub.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=\ngithub.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034=\ngithub.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=\ngithub.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=\ngithub.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=\ngithub.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=\ngithub.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=\ngithub.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=\ngithub.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=\ngithub.com/pkg/term v1.2.0-beta.2 h1:L3y/h2jkuBVFdWiJvNfYfKmzcCnILw7mJWm2JQuMppw=\ngithub.com/pkg/term v1.2.0-beta.2/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=\ngithub.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=\ngithub.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=\ngithub.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=\ngithub.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=\ngithub.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=\ngithub.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=\ngithub.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=\ngithub.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=\ngithub.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=\ngithub.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=\ngithub.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=\ngithub.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=\ngithub.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=\ngithub.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=\ngithub.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=\ngithub.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=\ngithub.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=\ngithub.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=\ngithub.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=\ngithub.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=\ngithub.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=\ngithub.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=\ngithub.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=\ngithub.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=\ngithub.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=\ngithub.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=\ngithub.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=\ngithub.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=\ngithub.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=\ngithub.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=\ngithub.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=\ngithub.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=\ngithub.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=\ngithub.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=\ngithub.com/rudderlabs/analytics-go/v4 v4.1.0 h1:9fanGq85Vt7zhxJ3U9wTEEI6lK3zKbEqJf2j81XbINs=\ngithub.com/rudderlabs/analytics-go/v4 v4.1.0/go.mod h1:/kXZkGO7S0of698Z62p8Y6KPH1nFaok32di9WPZMiE4=\ngithub.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/sashabaranov/go-openai v1.5.7 h1:8DGgRG+P7yWixte5j720y6yiXgY3Hlgcd0gcpHdltfo=\ngithub.com/sashabaranov/go-openai v1.5.7/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=\ngithub.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=\ngithub.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=\ngithub.com/segmentio/backo-go v1.0.1 h1:68RQccglxZeyURy93ASB/2kc9QudzgIDexJ927N++y4=\ngithub.com/segmentio/backo-go v1.0.1/go.mod h1:9/Rh6yILuLysoQnZ2oNooD2g7aBnvM7r/fNVxRNWfBc=\ngithub.com/selefra/selefra-provider-sdk v0.0.21 h1:y+wsndma5yi3k7NaJa/baoEMUz7Lyk83ENdA4ph4ba4=\ngithub.com/selefra/selefra-provider-sdk v0.0.21/go.mod h1:7kxOCmokgSZu0PznEV9vuin9nU3dz8SV6bht8yPolbk=\ngithub.com/selefra/selefra-provider-sdk v0.0.23-0.20230818060159-5a522673a95e h1:dhEH7B/rrNlUlnAK4b5vn44qNJvi1JKjllpkLH5pisY=\ngithub.com/selefra/selefra-provider-sdk v0.0.23-0.20230818060159-5a522673a95e/go.mod h1:7kxOCmokgSZu0PznEV9vuin9nU3dz8SV6bht8yPolbk=\ngithub.com/selefra/selefra-provider-sdk v0.0.23-0.20230818075347-cef95b1e16a5 h1:BnvuSslYicIHciIPe8Klr+2AG700SpwHePW45Q/5srY=\ngithub.com/selefra/selefra-provider-sdk v0.0.23-0.20230818075347-cef95b1e16a5/go.mod h1:9fVOT6k/EY9O8xkfNtnoyZl/YeN45JDixGpyDu4ONp0=\ngithub.com/selefra/selefra-utils v0.0.4 h1:NJ1d8qr0I4Gse9lxk2oll0QlTGtuNdBlqRPoViHXhBA=\ngithub.com/selefra/selefra-utils v0.0.4/go.mod h1:LPY6RNU12IniRH3xz7yS7xF10YH3emQ6NWrn6UuBjEk=\ngithub.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=\ngithub.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ=\ngithub.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=\ngithub.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=\ngithub.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=\ngithub.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=\ngithub.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=\ngithub.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=\ngithub.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=\ngithub.com/songzhibin97/gkit v1.2.7 h1:Z5GJs8jysk4pEer/bSeo49Xh/iAGXz9dZfiZSPyfRy8=\ngithub.com/songzhibin97/gkit v1.2.7/go.mod h1:fqP08z5X5p1rdYCoxTtovvDQ91wcWsablOuMPVqQHEc=\ngithub.com/songzhibin97/go-ognl v0.0.3-0.20220831071354-b4f21269f576 h1:qOM453XbsWqNGCz0rKpCK852Ut8h9VCMdZqhfAf8KSw=\ngithub.com/songzhibin97/go-ognl v0.0.3-0.20220831071354-b4f21269f576/go.mod h1:0tuH6BQ4cHgQaQ2Ch4XpJILNqvQDnGcE9NObPY6rFOM=\ngithub.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=\ngithub.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=\ngithub.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=\ngithub.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=\ngithub.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=\ngithub.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=\ngithub.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=\ngithub.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=\ngithub.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=\ngithub.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=\ngithub.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU=\ngithub.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=\ngithub.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=\ngithub.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=\ngithub.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=\ngithub.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=\ngithub.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=\ngithub.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=\ngithub.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM=\ngithub.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=\ngithub.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=\ngithub.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=\ngithub.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=\ngithub.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=\ngithub.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=\ngithub.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=\ngithub.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=\ngithub.com/vbauerster/mpb/v7 v7.5.3 h1:BkGfmb6nMrrBQDFECR/Q7RkKCw7ylMetCb4079CGs4w=\ngithub.com/vbauerster/mpb/v7 v7.5.3/go.mod h1:i+h4QY6lmLvBNK2ah1fSreiw3ajskRlBp9AhY/PnuOE=\ngithub.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=\ngithub.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=\ngithub.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=\ngithub.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=\ngithub.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=\ngithub.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=\ngo.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=\ngo.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=\ngo.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=\ngo.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=\ngo.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=\ngo.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=\ngo.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=\ngo.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=\ngo.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=\ngo.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=\ngo.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=\ngo.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=\ngo.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=\ngo.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=\ngo.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=\ngo.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=\ngo.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=\ngo.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=\ngo.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=\ngo.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=\ngo.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=\ngo.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=\ngo.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=\ngo.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=\ngo.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=\ngo.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY=\ngo.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY=\ngolang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=\ngolang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=\ngolang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=\ngolang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=\ngolang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=\ngolang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=\ngolang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=\ngolang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=\ngolang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=\ngolang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=\ngolang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=\ngolang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=\ngolang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=\ngolang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=\ngolang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=\ngolang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=\ngolang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=\ngolang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=\ngolang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=\ngolang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=\ngolang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=\ngolang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=\ngolang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=\ngolang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=\ngolang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=\ngolang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=\ngolang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=\ngolang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=\ngolang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=\ngolang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=\ngolang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=\ngolang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=\ngolang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=\ngolang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=\ngolang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=\ngolang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=\ngolang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=\ngolang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=\ngolang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=\ngolang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=\ngolang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=\ngolang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=\ngolang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=\ngolang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=\ngolang.org/x/oauth2 v0.2.0 h1:GtQkldQ9m7yvzCL1V+LrYow3Khe0eJH0w7RbX/VbaIU=\ngolang.org/x/oauth2 v0.2.0/go.mod h1:Cwn6afJ8jrQwYMxQDTpISoXmXW9I6qF6vDeuuoX3Ibs=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=\ngolang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=\ngolang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=\ngolang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=\ngolang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=\ngolang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=\ngolang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=\ngolang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=\ngolang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=\ngolang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=\ngolang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=\ngolang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=\ngolang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=\ngolang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=\ngolang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=\ngolang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=\ngolang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=\ngoogle.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=\ngoogle.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=\ngoogle.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=\ngoogle.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=\ngoogle.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=\ngoogle.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=\ngoogle.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=\ngoogle.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=\ngoogle.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=\ngoogle.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=\ngoogle.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=\ngoogle.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=\ngoogle.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=\ngoogle.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=\ngoogle.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=\ngoogle.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=\ngoogle.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=\ngoogle.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=\ngoogle.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=\ngoogle.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=\ngoogle.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=\ngoogle.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=\ngoogle.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=\ngoogle.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=\ngoogle.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=\ngoogle.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=\ngoogle.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=\ngoogle.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=\ngoogle.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=\ngoogle.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=\ngoogle.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=\ngoogle.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=\ngoogle.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=\ngoogle.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI=\ngoogle.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=\ngoogle.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=\ngoogle.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=\ngoogle.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=\ngoogle.golang.org/api v0.103.0 h1:9yuVqlu2JCvcLg9p8S3fcFLZij8EPSyvODIY1rkMizQ=\ngoogle.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=\ngoogle.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=\ngoogle.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=\ngoogle.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=\ngoogle.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=\ngoogle.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=\ngoogle.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=\ngoogle.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=\ngoogle.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=\ngoogle.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=\ngoogle.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=\ngoogle.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=\ngoogle.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=\ngoogle.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=\ngoogle.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=\ngoogle.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=\ngoogle.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=\ngoogle.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=\ngoogle.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=\ngoogle.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=\ngoogle.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=\ngoogle.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=\ngoogle.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=\ngoogle.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=\ngoogle.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=\ngoogle.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=\ngoogle.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=\ngoogle.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=\ngoogle.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=\ngoogle.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=\ngoogle.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=\ngoogle.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=\ngoogle.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=\ngoogle.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=\ngoogle.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=\ngoogle.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=\ngoogle.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=\ngoogle.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=\ngoogle.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=\ngoogle.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=\ngoogle.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=\ngoogle.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=\ngoogle.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=\ngoogle.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=\ngoogle.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE=\ngoogle.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=\ngoogle.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=\ngoogle.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=\ngoogle.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=\ngoogle.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=\ngoogle.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=\ngoogle.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=\ngoogle.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=\ngoogle.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=\ngoogle.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=\ngoogle.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo=\ngoogle.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw=\ngoogle.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=\ngoogle.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI=\ngoogle.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U=\ngoogle.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=\ngoogle.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM=\ngoogle.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=\ngoogle.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c=\ngoogle.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=\ngoogle.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=\ngoogle.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=\ngoogle.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=\ngoogle.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=\ngoogle.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=\ngoogle.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=\ngoogle.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=\ngoogle.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=\ngoogle.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=\ngoogle.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=\ngoogle.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=\ngoogle.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=\ngoogle.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=\ngoogle.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=\ngoogle.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=\ngoogle.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=\ngoogle.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=\ngoogle.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=\ngoogle.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=\ngoogle.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=\ngoogle.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=\ngoogle.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=\ngoogle.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=\ngoogle.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=\ngoogle.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=\ngoogle.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=\ngoogle.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=\ngoogle.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=\ngoogle.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=\ngoogle.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=\ngoogle.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=\ngoogle.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=\ngoogle.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=\ngoogle.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=\ngoogle.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=\ngoogle.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=\ngoogle.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=\ngoogle.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=\ngoogle.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=\ngoogle.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=\ngoogle.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=\ngoogle.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=\ngoogle.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=\ngoogle.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=\ngopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=\ngopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=\ngopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=\ngopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=\ngopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=\ngopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=\ngopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=\ngopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=\nhonnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=\nhonnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=\noras.land/oras-go v1.2.1 h1:/VcGS8FUy3eEXLl/1vC4QypLHwrfSmgW7ygsoklqKK8=\noras.land/oras-go v1.2.1/go.mod h1:3N11Z5E3c4ZzOjroCl1RtAdB4yNAYl7A27j2SVf913A=\nrsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=\nrsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=\nrsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=\n"
  },
  {
    "path": "install.sh",
    "content": "#!/bin/bash\n#######################################################################################################################\n#                                                                                                                     #\n#            This is Selefra's one-click installation script that supports both MacOS and Linux                       #\n#                                                                                                                     #\n#                                                                                                                     #\n#                                                                                                   Version: 0.0.1    #\n#                                                                                                                     #\n#######################################################################################################################\n\n# ---------------------------------------------------------- init ------------------------------------------------------\n\nRESET=\"\\\\033[0m\"\nRED=\"\\\\033[31;1m\"\nGREEN=\"\\\\033[32;1m\"\nYELLOW=\"\\\\033[33;1m\"\nBLUE=\"\\\\033[34;1m\"\nWHITE=\"\\\\033[37;1m\"\n\nsay_green()\n{\n    [ -z \"${SILENT}\" ] && printf \"%b%s%b\\\\n\" \"${GREEN}\" \"$1\" \"${RESET}\"\n    return 0\n}\n\nsay_red()\n{\n    printf \"%b%s%b\\\\n\" \"${RED}\" \"$1\" \"${RESET}\"\n}\n\nsay_yellow()\n{\n    [ -z \"${SILENT}\" ] && printf \"%b%s%b\\\\n\" \"${YELLOW}\" \"$1\" \"${RESET}\"\n    return 0\n}\n\nsay_blue()\n{\n    [ -z \"${SILENT}\" ] && printf \"%b%s%b\\\\n\" \"${BLUE}\" \"$1\" \"${RESET}\"\n    return 0\n}\n\nsay_white()\n{\n    [ -z \"${SILENT}\" ] && printf \"%b%s%b\\\\n\" \"${WHITE}\" \"$1\" \"${RESET}\"\n    return 0\n}\n\nat_exit()\n{\n    if [ \"$?\" -ne 0 ]; then\n        >&2 say_red\n        >&2 say_red \"We're sorry, but it looks like something might have gone wrong during installation.\"\n        >&2 say_red \"If you need help, please join us on https://www.selefra.io/community/join\"\n    fi\n}\n\n# TODO Add boot path for source compilation\nprint_unsupported_platform() {\n    >&2 say_red \"error: We're sorry, but it looks like selefra is not supported on your platform\"\n    >&2 say_red \"       We support 64-bit versions of Linux and macOS and are interested in supporting\"\n    >&2 say_red \"       more platforms.  Please open an issue at https://github.com/selefra/selefra/issues\"\n    >&2 say_red \"       and let us know what platform you're using!\"\n}\n\n# get os\nOS=\"\"\ncase $(uname) in\n    \"Linux\") OS=\"linux\";;\n    \"Darwin\") OS=\"darwin\";;\n    *)\n        print_unsupported_platform\n        exit 1\n        ;;\nesac\nsay_blue \"OS: ${OS}\"\n\n# get arch\nARCH=\"\"\ncase $(uname -m) in\n    \"x86_64\") ARCH=\"amd64\";;\n    \"arm64\") ARCH=\"arm64\";;\n    \"aarch64\") ARCH=\"arm64\";;\n    *)\n        print_unsupported_platform\n        exit 1\n        ;;\nesac\nsay_blue \"ARCH: ${ARCH}\"\n\n# ---------------------------------------------------------- check env -------------------------------------------------\n\n# check wget\nwhich wget\nif [ $? -ne 0 ]; then\n\tsay_red \"Sorry, you must have wget installed to use this script\"\n\tsay_red \"wget GNU repo: https://ftp.gnu.org/gnu/wget/\"\n\texit\nfi\n\n# check unzip tools by os\nunzip_command=\"\"\nfile_suffix=\"\"\ncase $OS in\n    \"linux\")\n\t# check tar\n\twhich tar\n\tif [ $? -ne 0 ]; then\n\t        say_red \"Sorry, you must have tar installed to use this script\"\n\t       \tsay_red \"tar homepage: https://www.gnu.org/software/tar/\"\n\t        exit\n\tfi\n\tunzip_command=\"tar zxvf\"\n\tfile_suffix=\".tar.gz\"\n\t;;\n    \"darwin\")\n\t# check tar\n\twhich unzip\n\tif [ $? -ne 0 ]; then\n\t\t# TODO Added boot link to install unzip\n\t\tsay_red \"Sorry, you must have unzip installed to use this script\"\n\t\texit\n\tfi\n\tunzip_command=\"unzip\"\n\tfile_suffix=\".zip\"\n\t;;\nesac\n\n# ---------------------------------------------------------- download --------------------------------------------------\n\ntrap at_exit EXIT\n\nsay_blue \"begin download selefra...\"\n# download\ndownload_url=\"https://github.com/selefra/selefra/releases/latest/download/selefra_${OS}_${ARCH}${file_suffix}\"\nsay_blue \"download selefra installation file from $download_url ...\"\ndownload_save_path=\"./selefra_${OS}_${ARCH}${file_suffix}\"\nwget -t 30 -T 60 $download_url -O $download_save_path\nif [ $? -ne 0 ]; then\n  say_red \"selefra installation file download failed, please check your network and try again!\"\n  exit\nfi\nsay_green \"selefra installation file download success!\"\n\n# ---------------------------------------------------------- install ---------------------------------------------------\n\n# unzip\nsay_blue \"begin unzip selefra installation file...\"\n$unzip_command $download_save_path\nsay_green \"unzip selefra installation file success!\"\n\n# Consider it fixed for now, update the policy here if it changes in the future\nselefra_executable_file_path=\"./selefra\"\n\n# add $PATH , need to add more path judgment? I don't know...\nif [[ $PATH =~ \"/usr/local/bin\" ]]; then\n\n\tcopy_to_path=\"/usr/local/bin/selefra\"\n\n\t# If it already exists, try to remove it\n\tif  [ -f \"${copy_to_path}\" ]; then\n\n\t\tcase $OS in\n\t\t\"linux\")\n\t\tsudo rm -rf $copy_to_path\n\t\t;;\n\t    \t\"darwin\")\n\t\trm -rf $copy_to_path\n\t\t;;\n\t\tesac\n\n\t\tif [ $? -ne 0 ]; then\n      say_red \"The ${copy_to_path} file already exists and cannot be deleted. please manually update the ${copy_to_path}\"\n\t\t  exit\n\t\tfi\n\tfi\n\n\t# Then make a copy of selefra to the target path\n\n\tcase $OS in\n\t\"linux\")\n\tsudo cp ./selefra /usr/local/bin/selefra\n\t;;\n    \t\"darwin\")\n\tcp ./selefra /usr/local/bin/selefra\n\t;;\n\tesac\n\n\tif [ $? -ne 0 ]; then\n        \tsay_red 'copy selefra to $PATH failed. please add it manually'\n\t\texit\n\tfi\n\n\t# Then delete all the files generated during your installation\n  say_green \"Delete temporary files during installation...\"\n\trm $download_save_path\n  rm $selefra_executable_file_path\n\nfi\n\n# TODO Adds a boot link to the Quick Start document\nsay_green \"selefra download and install success!\"\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n"
  },
  {
    "path": "main.go",
    "content": "/*\nCopyright © 2022 NAME HERE <EMAIL ADDRESS>\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra/cmd\"\n\t\"github.com/selefra/selefra/pkg/debug\"\n\t\"time\"\n)\n\nfunc main() {\n\tservice := debug.NewSamplingService(\"./\", time.Second*30)\n\terr := service.Start()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tcmd.Execute()\n\tservice.Stop()\n}\n"
  },
  {
    "path": "pkg/cli_env/env.go",
    "content": "package cli_env\n\nimport (\n\t\"os\"\n\t\"strings\"\n)\n\nconst SelefraCloudFlag = \"SELEFRA_CLOUD_FLAG\"\n\n// IsCloudEnv Check whether the system is running in the cloud environment\nfunc IsCloudEnv() bool {\n\tflag := strings.ToLower(os.Getenv(SelefraCloudFlag))\n\treturn flag == \"true\" || flag == \"enable\"\n}\n\n//func GetTaskID() {\n//\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst SelefraServerHost = \"SELEFRA_CLOUD_HOST\"\n\nconst DefaultCloudHost = \"main-grpc.selefra.io:1234\"\n\n// GetServerHost Gets the address of the server\nfunc GetServerHost() string {\n\n\t// read from env\n\tif os.Getenv(SelefraServerHost) != \"\" {\n\t\treturn os.Getenv(SelefraServerHost)\n\t}\n\n\treturn DefaultCloudHost\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst SelefraCloudToken = \"SELEFRA_CLOUD_TOKEN\"\n\nfunc GetCloudToken() string {\n\treturn os.Getenv(SelefraCloudToken)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst SelefraCloudHttpHost = \"SELEFRA_CLOUD_HTTP_HOST\"\n\nconst DefaultSelefraCloudHttpHost = \"https://www.selefra.io\"\n\nfunc GetSelefraCloudHttpHost() string {\n\n\tif os.Getenv(SelefraCloudHttpHost) != \"\" {\n\t\treturn os.Getenv(SelefraCloudHttpHost)\n\t}\n\n\treturn DefaultSelefraCloudHttpHost\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\nconst SelefraTelemetryEnable = \"SELEFRA_TELEMETRY_ENABLE\"\n\nfunc GetSelefraTelemetryEnable() string {\n\treturn strings.ToLower(os.Getenv(SelefraTelemetryEnable))\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// SelefraTelemetryToken Compile-time injection\nvar SelefraTelemetryToken = \"\"\n\nconst SelefraTelemetryTokenEnvName = \"SELEFRA_TELEMETRY_TOKEN\"\n\nfunc GetSelefraTelemetryToken() string {\n\tif SelefraTelemetryToken != \"\" {\n\t\treturn SelefraTelemetryToken\n\t}\n\treturn os.Getenv(SelefraTelemetryTokenEnvName)\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/cli_runtime/runtime.go",
    "content": "package cli_runtime\n//\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n//\t\"github.com/selefra/selefra/cli_ui\"\n//\t\"github.com/selefra/selefra/pkg/cli_env\"\n//\t\"github.com/selefra/selefra/pkg/cloud_sdk\"\n//\t\"github.com/selefra/selefra/pkg/message\"\n//\t\"github.com/selefra/selefra/pkg/modules/module\"\n//\t\"github.com/selefra/selefra/pkg/modules/module_loader\"\n//\t\"github.com/selefra/selefra/pkg/utils\"\n//)\n//\n//// Runtime Command line runtime\n//var Runtime *CLIRuntime\n//\n//type CLIRuntime struct {\n//\n//\t// Which is the working directory\n//\tWorkspace string\n//\n//\t// Which directory to download it to\n//\tDownloadWorkspace string\n//\n//\t// Errors that may occur during operation\n//\tDiagnostics *schema.Diagnostics\n//\n//\t// The root module in the working directory\n//\tRootModule *module.Module\n//\n//\tCloudClient *cloud_sdk.CloudClient\n//}\n//\n//func Init(workspace string) {\n//\tRuntime = NewCLIRuntime(workspace)\n//\tRuntime.LoadWorkspaceModule()\n//}\n//\n//func NewCLIRuntime(workspace string) *CLIRuntime {\n//\tx := &CLIRuntime{\n//\t\tWorkspace: workspace,\n//\t}\n//\treturn x\n//}\n//\n//func (x *CLIRuntime) InitCloudClient() {\n//\thost, diagnostics := FindServerHost()\n//\tx.Diagnostics.AddDiagnostics(diagnostics)\n//\tif utils.HasError(diagnostics) {\n//\t\treturn\n//\t}\n//\tclient, d := cloud_sdk.NewCloudClient(host)\n//\tx.Diagnostics.AddDiagnostics(d)\n//\tif utils.HasError(d) {\n//\t\treturn\n//\t}\n//\tx.CloudClient = client\n//\n//\t// Log in automatically if you have local credentials\n//\tcredentials, _ := client.GetCredentials()\n//\tif credentials != nil {\n//\t\tlogin, d := client.Login(credentials.Token)\n//\t\tif utils.HasError(d) {\n//\t\t\tcli_ui.ShowLoginFailed(credentials.Token)\n//\t\t\treturn\n//\t\t}\n//\t\tcli_ui.ShowLoginSuccess(host, login)\n//\t}\n//\n//}\n//\n//func (x *CLIRuntime) LoadWorkspaceModule() *CLIRuntime {\n//\n//\tif utils.HasError(x.Diagnostics) {\n//\t\treturn x\n//\t}\n//\n//\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n//\t\t// TODO log\n//\t})\n//\tloader, err := module_loader.NewLocalDirectoryModuleLoader(&module_loader.LocalDirectoryModuleLoaderOptions{\n//\t\tModuleDirectory: x.Workspace,\n//\t\tModuleLoaderOptions: &module_loader.ModuleLoaderOptions{\n//\t\t\tMessageChannel: messageChannel,\n//\t\t},\n//\t})\n//\tif err != nil {\n//\t\tmessageChannel.SenderWaitAndClose()\n//\t\tx.Diagnostics.AddErrorMsg(\"create module load from directory %s error: %s\", x.Workspace, err.Error())\n//\t\treturn x\n//\t}\n//\tworkspaceModule, _ := loader.Load(context.Background())\n//\tmessageChannel.ReceiverWait()\n//\tif workspaceModule != nil {\n//\t\tx.RootModule = workspaceModule\n//\t}\n//\n//\treturn x\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n//\n//func FindServerHost() (string, *schema.Diagnostics) {\n//\n//\t// Try to get it from the configuration file\n//\tif Runtime != nil &&\n//\t\tRuntime.RootModule != nil &&\n//\t\tRuntime.RootModule.SelefraBlock != nil &&\n//\t\tRuntime.RootModule.SelefraBlock.CloudBlock != nil &&\n//\t\tRuntime.RootModule.SelefraBlock.CloudBlock.HostName != \"\" {\n//\t\treturn Runtime.RootModule.SelefraBlock.CloudBlock.HostName, nil\n//\t}\n//\n//\t// Try to get it from an environment variable\n//\tif cli_env.GetServerHost() != \"\" {\n//\t\treturn cli_env.GetServerHost(), nil\n//\t}\n//\n//\t// You can't get either, so use the default\n//\treturn DefaultCloudHost, nil\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n//\n//func GetDSN() (string, *schema.Diagnostics) {\n//\n//\t// Use the configuration of the current module first if it is configured in the current module\n//\tif Runtime != nil && Runtime.RootModule != nil && Runtime.RootModule.SelefraBlock != nil && Runtime.RootModule.SelefraBlock.ConnectionBlock != nil {\n//\t\treturn Runtime.RootModule.SelefraBlock.ConnectionBlock.BuildDSN(), nil\n//\t}\n//\n//\t// Otherwise, check whether to log in\n//\tif Runtime.CloudClient != nil && Runtime.CloudClient.IsLoggedIn() {\n//\t\treturn Runtime.CloudClient.FetchOrgDSN()\n//\t}\n//\n//\t//// Environment variable\n//\t//if env.GetDatabaseDsn() != \"\" {\n//\t//\treturn env.GetDatabaseDsn(), nil\n//\t//}\n//\n//\t// TODO Built-in PG database\n//\treturn \"\", nil\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/cloud_sdk/client.go",
    "content": "package cloud_sdk\n\n//import (\n//\t\"context\"\n//\t\"fmt\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"github.com/selefra/selefra/pkg/grpc_client/proto/issue\"\n//\t\"github.com/selefra/selefra/pkg/utils\"\n//\t\"google.golang.org/grpc\"\n//\t\"google.golang.org/grpc/credentials/insecure\"\n//\t\"google.golang.org/protobuf/types/known/timestamppb\"\n//\t\"strings\"\n//\t\"sync\"\n//)\n//\n//type client struct {\n//\tctx context.Context\n//\n//\t// conn is a grpc connection\n//\tconn *grpc.ClientConn\n//\n//\t// openedIssueStreamClient is an opened issue upload stream client\n//\topenedIssueStreamClient issue.Issue_UploadIssueStreamClient\n//\n//\t// openedLogStreamClient is an opened log upload stream client\n//\topenedLogStreamClient logPb.Log_UploadLogStreamClient\n//\n//\t// logClient is a client for upload Log\n//\tlogClient logPb.LogClient\n//\n//\ttaskID    string\n//\ttoken     string\n//\tstatusMap map[string]string\n//}\n//\n//var o = sync.Once{}\n//var c *client\n//\n//func shouldClient() {\n//\to.Do(func() {\n//\t\t// when user not login, do nothing\n//\t\tif global.Token() == \"\" {\n//\t\t\treturn\n//\t\t}\n//\n//\t\tctx := context.Background()\n//\t\tvar conn *grpc.ClientConn\n//\t\tvar err error\n//\t\tconn, err = grpc.Dial(getDial(), grpc.WithTransportCredentials(insecure.NewCredentials()))\n//\t\tif err != nil {\n//\t\t}\n//\n//\t\tinnerClient := client{\n//\t\t\tctx:       ctx,\n//\t\t\tconn:      conn,\n//\t\t\tstatusMap: make(map[string]string),\n//\t\t}\n//\n//\t\tvar openedLogStreamClient logPb.Log_UploadLogStreamClient\n//\t\tlogClient := logPb.NewLogClient(conn)\n//\t\tinnerClient.logClient = logClient\n//\n//\t\topenedLogStreamClient, err = logClient.UploadLogStream(ctx)\n//\t\tif err != nil {\n//\t\t\treturn\n//\t\t}\n//\t\tinnerClient.openedLogStreamClient = openedLogStreamClient\n//\n//\t\tvar openedIssueStreamClient issue.Issue_UploadIssueStreamClient\n//\t\tissueStreamClient := issue.NewIssueClient(conn)\n//\t\topenedIssueStreamClient, err = issueStreamClient.UploadIssueStream(ctx)\n//\t\tif err != nil {\n//\t\t\treturn\n//\t\t}\n//\t\tinnerClient.openedIssueStreamClient = openedIssueStreamClient\n//\n//\t\tutils.MultiRegisterClose(map[string]func(){\n//\t\t\t\"grpc conn\": func() {\n//\t\t\t\t_ = conn.Close()\n//\t\t\t},\n//\t\t\t\"log stream\": func() {\n//\t\t\t\t_ = openedLogStreamClient.CloseSend()\n//\t\t\t},\n//\t\t\t\"issue stream\": func() {\n//\t\t\t\t_ = openedIssueStreamClient.CloseSend()\n//\t\t\t},\n//\t\t})\n//\n//\t\tc = &innerClient\n//\t})\n//}\n//\n//func IssueStreamSend(req *issue.Req) error {\n//\tif c == nil {\n//\t\tshouldClient()\n//\t\tif c == nil {\n//\t\t\treturn nil\n//\t\t}\n//\t}\n//\n//\treturn c.openedIssueStreamClient.Send(req)\n//}\n//\n//func IssueStreamClose() error {\n//\tif c == nil {\n//\t\tshouldClient()\n//\t\tif c == nil {\n//\t\t\treturn nil\n//\t\t}\n//\t}\n//\n//\treturn c.openedIssueStreamClient.CloseSend()\n//}\n//\n//func LogStreamSend(req *logPb.ConnectMsg) error {\n//\tif c == nil {\n//\t\tshouldClient()\n//\t\tif c == nil {\n//\t\t\treturn nil\n//\t\t}\n//\t}\n//\n//\treturn c.openedLogStreamClient.Send(req)\n//}\n//\n//func LogStreamClose() error {\n//\tif c == nil {\n//\t\tshouldClient()\n//\t\tif c == nil {\n//\t\t\treturn nil\n//\t\t}\n//\t}\n//\n//\treturn c.openedLogStreamClient.CloseSend()\n//}\n//\n//func SetStatus(status string) {\n//\tif c == nil {\n//\t\tshouldClient()\n//\t\tif c == nil {\n//\t\t\treturn\n//\t\t}\n//\t}\n//\n//\tc.statusMap[global.Stage()] = status\n//}\n//\n//func GetStatus() string {\n//\tif c == nil {\n//\t\tshouldClient()\n//\t\tif c == nil {\n//\t\t\treturn \"success\"\n//\t\t}\n//\t}\n//\n//\treturn c.statusMap[global.Stage()]\n//}\n//\n//func SetTaskID(taskId string) {\n//\tif c == nil {\n//\t\tshouldClient()\n//\t\tif c == nil {\n//\t\t\treturn\n//\t\t}\n//\t}\n//\n//\tc.taskID = taskId\n//}\n//\n//func TaskID() string {\n//\tif c == nil {\n//\t\tshouldClient()\n//\t\tif c == nil {\n//\t\t\treturn \"\"\n//\t\t}\n//\t}\n//\n//\treturn c.taskID\n//}\n//\n//func Token() string {\n//\tif c == nil {\n//\t\tshouldClient()\n//\t\tif c == nil {\n//\t\t\treturn \"\"\n//\t\t}\n//\t}\n//\n//\treturn c.token\n//}\n//\n//func UploadLogStatus() (*logPb.Res, error) {\n//\tif c == nil {\n//\t\tshouldClient()\n//\t\tif c == nil {\n//\t\t\treturn nil, nil\n//\t\t}\n//\t}\n//\n//\tstatusInfo := &logPb.StatusInfo{\n//\t\tBaseInfo: &logPb.BaseConnectionInfo{\n//\t\t\tToken:  c.token,\n//\t\t\tTaskId: c.taskID,\n//\t\t},\n//\t\tStag:   global.Stage(),\n//\t\tStatus: c.statusMap[global.Stage()],\n//\t\tTime:   timestamppb.Now(),\n//\t}\n//\tres, err := c.logClient.UploadLogStatus(c.ctx, statusInfo)\n//\tif err != nil {\n//\t\treturn nil, fmt.Errorf(\"Fail to upload log status:%s\", err.Error())\n//\t}\n//\n//\treturn res, nil\n//}\n//\n//func getDial() string {\n//\tvar dialMap = make(map[string]string)\n//\tdialMap[\"dev-api.selefra.io\"] = \"dev-tcp.selefra.io:1234\"\n//\tdialMap[\"main-api.selefra.io\"] = \"main-tcp.selefra.io:1234\"\n//\tdialMap[\"pre-api.selefra.io\"] = \"pre-tcp.selefra.io:1234\"\n//\tif dialMap[global.SERVER] != \"\" {\n//\t\treturn dialMap[global.SERVER]\n//\t}\n//\tarr := strings.Split(global.SERVER, \":\")\n//\treturn arr[0] + \":1234\"\n//}\n"
  },
  {
    "path": "pkg/cloud_sdk/credentials.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"path/filepath\"\n\t\"time\"\n)\n\nconst CredentialsFileName = \"credentials.json\"\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// CloudCredentials Credentials to connect to the selefra cloud\ntype CloudCredentials struct {\n\n\t// The name of the token above is easy to remember\n\t// This name is set when the token is created on the cloud side\n\tTokenName string `json:\"token_name\"`\n\n\t// token used for authentication\n\tToken string `json:\"token\"`\n\n\tUserName string `json:\"user_name\"`\n\n\tOrgName string `json:\"org_name\"`\n\n\tServerHost string `json:\"server_host\"`\n\n\t// Time of the last login\n\tLoginTime time.Time `json:\"login_time\"`\n\n\t// The last time the token was used\n\tLastUseTime time.Time `json:\"last_login_time\"`\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// GetCredentialsWorkspacePath get the Credentials save directory\nfunc (x *CloudClient) GetCredentialsWorkspacePath() (string, *schema.Diagnostics) {\n\tpath, err := config.GetSelefraHomeWorkspacePath()\n\tif err != nil {\n\t\treturn \"\", schema.NewDiagnostics().AddError(err)\n\t}\n\treturn filepath.Join(path, CredentialsFileName), nil\n}\n\n// SaveCredentials Save the login credentials to the local directory\nfunc (x *CloudClient) SaveCredentials(credentials *CloudCredentials) *schema.Diagnostics {\n\n\tselefraHomeDirectory, err := config.GetSelefraHomeWorkspacePath()\n\tif err != nil {\n\t\treturn schema.NewDiagnostics().AddErrorMsg(\"get selefra home directory error: %s\", err.Error())\n\t}\n\terr = utils.EnsureDirectoryExists(selefraHomeDirectory)\n\tif err != nil {\n\t\treturn schema.NewDiagnostics().AddErrorMsg(\"create directory %s error: %s\", selefraHomeDirectory, err.Error())\n\t}\n\n\tpath, d := x.GetCredentialsWorkspacePath()\n\tif utils.HasError(d) {\n\t\treturn d\n\t}\n\n\terr = utils.WriteJsonFile[*CloudCredentials](path, credentials)\n\tif err != nil {\n\t\treturn schema.NewDiagnostics().AddError(err)\n\t}\n\treturn nil\n}\n\n// GetCredentials Read the credentials stored in the local directory\nfunc (x *CloudClient) GetCredentials() (*CloudCredentials, *schema.Diagnostics) {\n\tpath, d := x.GetCredentialsWorkspacePath()\n\tif utils.HasError(d) {\n\t\treturn nil, d\n\t}\n\tcredentials, err := utils.ReadJsonFile[*CloudCredentials](path)\n\tif err != nil {\n\t\treturn nil, schema.NewDiagnostics().AddError(err)\n\t}\n\tif credentials.Token == \"\" {\n\t\treturn nil, nil\n\t}\n\treturn credentials, nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/cloud_sdk/dsn.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/cloud\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// FetchOrgDSN Getting a user-configured database connection from the selefra cloud may not be configured\nfunc (x *CloudClient) FetchOrgDSN() (string, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tif !x.IsLoggedIn() {\n\t\treturn \"\", diagnostics.AddErrorMsg(\"You need login first!\")\n\t}\n\n\tresponse, err := x.cloudClient.FetchOrgDsn(x.BuildMetaContext(), &cloud.RequestEmpty{})\n\tif err != nil {\n\t\treturn \"\", schema.NewDiagnostics().AddErrorMsg(\"Request DSN from selefra cloud failed: %s\", err.Error())\n\t}\n\tif response.Diagnosis != nil && response.Diagnosis.Code != 0 {\n\t\treturn \"\", schema.NewDiagnostics().AddErrorMsg(\"Request DSN from selefra cloud response error, code = %d, msg = %s\", response.Diagnosis.Code, response.Diagnosis.Msg)\n\t}\n\treturn response.Dsn, nil\n}\n"
  },
  {
    "path": "pkg/cloud_sdk/dsn_test.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestCloudClient_FetchOrgDSN(t *testing.T) {\n\tclient := getAuthedSDKClientForTest()\n\tdsn, diagnostics := client.FetchOrgDSN()\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotEmpty(t, dsn)\n}\n"
  },
  {
    "path": "pkg/cloud_sdk/errors.go",
    "content": "package cloud_sdk\n\nimport \"errors\"\n\nvar (\n\tErrYouAreNotLogin = errors.New(\"you are not logged in\")\n)\n"
  },
  {
    "path": "pkg/cloud_sdk/project.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/cloud\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/common\"\n)\n\n// CreateProject Returns the name of the project if the given project name already exists,\n// otherwise creates the project and returns information about the project\nfunc (x *CloudClient) CreateProject(projectName string) (*cloud.CreateProject_Response, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tif !x.IsLoggedIn() {\n\t\treturn nil, diagnostics.AddErrorMsg(\"You need login first!\")\n\t}\n\n\tresponse, err := x.cloudClient.CreateProject(x.BuildMetaContext(), &cloud.CreateProject_Request{\n\t\tName: projectName,\n\t})\n\tif err != nil {\n\t\treturn nil, diagnostics.AddErrorMsg(\"create cloud project error: %s\", err.Error())\n\t}\n\tif response.Diagnosis != nil && response.Diagnosis.Code != 0 {\n\t\tswitch response.Diagnosis.Code {\n\t\tcase common.Diagnosis_NoAuthority:\n\t\t\terrorMsg := `Free users can only create a project, you can pay in this upgrade at https://app.selefra.io/Settings/planBilling\nAlternatively, you can logout the currently logged user using the command selefra logout, which will not be synchronized to the cloud.`\n\t\t\treturn nil, diagnostics.AddErrorMsg(errorMsg)\n\t\tdefault:\n\t\t\treturn nil, diagnostics.AddErrorMsg(\"create cloud project response error, code = %d, message = %s\", response.Diagnosis.Code, response.Diagnosis.Msg)\n\t\t}\n\t}\n\n\treturn response, nil\n}\n\n//// ------------------------------------------------- --------------------------------------------------------------------\n//\n//type CreateProjectRequest struct {\n//\tToken       string `json:\"token\"`\n//\tProjectName string `json:\"name\"`\n//}\n//\n//type CreateProjectData struct {\n//\tName    string `json:\"name\"`\n//\tOrgName string `json:\"org_name\"`\n//}\n//\n//// CreateProject create a project in selefra cloud when use is login, else do nothing\n//func (x *CloudClient) CreateProject(ctx context.Context, projectName string) (orgName string, err error) {\n//\n//\tif !x.IsLoggedIn() {\n//\t\treturn\n//\t}\n//\n//\tresponse, err := http_client.PostJson[*CreateProjectRequest, *Response[CreateProjectData]](ctx, x.buildAPIURL(\"/cli/create_project\"), &CreateProjectRequest{})\n//\tif err != nil {\n//\t\treturn \"\", err\n//\t}\n//\tif err := response.Check(); err != nil {\n//\t\treturn \"\", err\n//\t}\n//\treturn response.Data.OrgName, nil\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n//\n//type Stage string\n//\n//const (\n//\tCreating = \"creating\"\n//\tTesting  = \"testing\"\n//\tFailed   = \"failed\"\n//\tSuccess  = \"success\"\n//)\n//\n//type SetupProjectStageDataRequest struct {\n//\tToken       string `json:\"token\"`\n//\tProjectName string `json:\"project_name\"`\n//\tStag        string `json:\"stag\"`\n//}\n//\n//type SetupProjectStageData struct{}\n//\n//// UploadSetupStage sync project stage to selefra cloud when use is login, else do nothing\n//func (x *CloudClient) UploadSetupStage(ctx context.Context, projectName string, stage Stage) error {\n//\n//\tif !x.IsLoggedIn() {\n//\t\treturn ErrYouAreNotLogin\n//\t}\n//\n//\tresponse, err := http_client.PostJson[*SetupProjectStageDataRequest, *Response[SetupProjectStageData]](ctx, x.buildAPIURL(\"/cli/update_setup_stag\"), &SetupProjectStageDataRequest{\n//\t\tToken:       x.token,\n//\t\tProjectName: projectName,\n//\t\tStag:        string(stage),\n//\t})\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tif err := response.Check(); err != nil {\n//\t\treturn err\n//\t}\n//\treturn nil\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/cloud_sdk/project_test.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestCloudClient_CreateProject(t *testing.T) {\n\tclient := getAuthedSDKClientForTest()\n\tproject, diagnostics := client.CreateProject(\"cli-test-project-\" + id_util.RandomId())\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, project)\n}\n"
  },
  {
    "path": "pkg/cloud_sdk/sdk.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\tselefraGrpc \"github.com/selefra/selefra/pkg/grpc\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/cloud\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/issue\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/log\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials/insecure\"\n\t\"google.golang.org/grpc/keepalive\"\n\t\"google.golang.org/grpc/metadata\"\n\t\"time\"\n)\n\nconst (\n\tIssueUploaderName = \"issue-uploader\"\n\tLogUploaderName   = \"log-uploader\"\n)\n\ntype CloudClient struct {\n\tserverUrl string\n\n\tcloudNoAuthClient cloud.CloudNoAuthClient\n\tcloudClient       cloud.CloudClient\n\n\ttaskId string\n\ttoken  string\n\n\t// This parameter is used to upload Issues\n\t//IssueStreamUploader *selefraGrpc.StreamUploader[issue.Issue_UploadIssueStreamClient, int, *issue.UploadIssueStream_Request, *issue.UploadIssueStream_Response]\n\n\t// This parameter is used to upload logs\n\t//LogClient         log.LogClient\n\t//LogStreamUploader *selefraGrpc.StreamUploader[log.Log_UploadLogStreamClient, int, *log.UploadLogStream_Request, *log.UploadLogStream_Response]\n\n\t//MessageChannel *message.Channel[*schema.Diagnostics]\n}\n\nfunc NewCloudClient(serverUrl string) (*CloudClient, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tx := &CloudClient{\n\t\tserverUrl: serverUrl,\n\t\t//cloudNoAuthClient: cloudNoAuthClient,\n\t\t//cloudClient:       cloud.NewCloudClient(conn),\n\t\t//IssueStreamUploader: nil,\n\t\t//LogStreamUploader:   nil,\n\t}\n\n\tconn, err := x.DialCloudHost()\n\tif err != nil {\n\t\treturn nil, diagnostics.AddErrorMsg(\"connect to cloud server %s failed: %s\", serverUrl, err.Error())\n\t}\n\tcloudNoAuthClient := cloud.NewCloudNoAuthClient(conn)\n\tx.cloudNoAuthClient = cloudNoAuthClient\n\n\tconn, err = x.DialCloudHost()\n\tif err != nil {\n\t\treturn nil, diagnostics.AddErrorMsg(\"connect to cloud server %s failed: %s\", serverUrl, err.Error())\n\t}\n\tx.cloudClient = cloud.NewCloudClient(conn)\n\n\treturn x, nil\n}\n\n//// InitTaskClientContext Initialize the task client context, for after report data to selefra cloud\n//func (x *CloudClient) InitTaskClientContext(taskId string, messageChannel *message.Channel[*schema.Diagnostics]) *schema.Diagnostics {\n//\n//\tdiagnostics := schema.NewDiagnostics()\n//\n//\tx.taskId = taskId\n//\tx.MessageChannel = messageChannel\n//\n//\tissueStreamUploader, d := x.NewIssueStreamUploader()\n//\tif diagnostics.AddDiagnostics(d).HasError() {\n//\t\treturn diagnostics\n//\t}\n//\tx.IssueStreamUploader = issueStreamUploader\n//\n//\tlogClient, logStreamUploader, d := x.NewLogStreamUploader()\n//\tif diagnostics.AddDiagnostics(d).HasError() {\n//\t\treturn diagnostics\n//\t}\n//\tx.LogClient = logClient\n//\tx.LogStreamUploader = logStreamUploader\n//\n//\treturn diagnostics\n//}\n\n// NewIssueStreamUploader Create a component that uploads Issues\nfunc (x *CloudClient) NewIssueStreamUploader(messageChannel *message.Channel[*schema.Diagnostics]) (*selefraGrpc.StreamUploader[issue.Issue_UploadIssueStreamClient, int, *issue.UploadIssueStream_Request, *issue.UploadIssueStream_Response], *schema.Diagnostics) {\n\n\t// new connection\n\tconn, err := x.DialCloudHost()\n\tif err != nil {\n\t\treturn nil, schema.NewDiagnostics().AddErrorMsg(\"connect to cloud server %s failed: %s\", x.serverUrl, err.Error())\n\t}\n\n\t// create upload issue stream client\n\tstream, err := issue.NewIssueClient(conn).UploadIssueStream(x.BuildMetaContext())\n\tif err != nil {\n\t\treturn nil, schema.NewDiagnostics().AddErrorMsg(\"\")\n\t}\n\tuploaderOptions := &selefraGrpc.StreamUploaderOptions[issue.Issue_UploadIssueStreamClient, int, *issue.UploadIssueStream_Request, *issue.UploadIssueStream_Response]{\n\t\tName:                      IssueUploaderName,\n\t\tClient:                    stream,\n\t\tWaitSendTaskQueueBuffSize: 1000,\n\t\tMessageChannel:            messageChannel,\n\t}\n\tuploader := selefraGrpc.NewStreamUploader[issue.Issue_UploadIssueStreamClient, int, *issue.UploadIssueStream_Request, *issue.UploadIssueStream_Response](uploaderOptions)\n\treturn uploader, nil\n}\n\n// NewLogStreamUploader Create a component that uploads logs\nfunc (x *CloudClient) NewLogStreamUploader(messageChannel *message.Channel[*schema.Diagnostics]) (log.LogClient, *selefraGrpc.StreamUploader[log.Log_UploadLogStreamClient, int, *log.UploadLogStream_Request, *log.UploadLogStream_Response], *schema.Diagnostics) {\n\n\t// new connection\n\tconn, err := x.DialCloudHost()\n\tif err != nil {\n\t\treturn nil, nil, schema.NewDiagnostics().AddErrorMsg(\"connect to cloud server %s failed: %s\", x.serverUrl, err.Error())\n\t}\n\n\t// create upload\n\tdiagnostics := schema.NewDiagnostics()\n\tclient := log.NewLogClient(conn)\n\tstream, err := client.UploadLogStream(x.BuildMetaContext())\n\tif err != nil {\n\t\treturn nil, nil, diagnostics.AddErrorMsg(\"create cloud log stream error: %s\", err.Error())\n\t}\n\tuploaderOptions := &selefraGrpc.StreamUploaderOptions[log.Log_UploadLogStreamClient, int, *log.UploadLogStream_Request, *log.UploadLogStream_Response]{\n\t\tName:                      LogUploaderName,\n\t\tClient:                    stream,\n\t\tWaitSendTaskQueueBuffSize: 1000,\n\t\tMessageChannel:            messageChannel,\n\t}\n\tuploader := selefraGrpc.NewStreamUploader[log.Log_UploadLogStreamClient, int, *log.UploadLogStream_Request, *log.UploadLogStream_Response](uploaderOptions)\n\treturn client, uploader, nil\n}\n\nfunc (x *CloudClient) DialCloudHost() (*grpc.ClientConn, error) {\n\treturn grpc.Dial(x.serverUrl,\n\t\tgrpc.WithTransportCredentials(insecure.NewCredentials()),\n\t\tgrpc.WithBlock(),\n\t\tgrpc.WithKeepaliveParams(keepalive.ClientParameters{\n\t\t\tTime:                3 * time.Second,\n\t\t\tTimeout:             3 * time.Minute,\n\t\t\tPermitWithoutStream: true}))\n\t//return grpc.Dial(x.serverUrl, grpc.WithTransportCredentials(insecure.NewCredentials()))\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *CloudClient) BuildMetaContext() context.Context {\n\treturn metadata.AppendToOutgoingContext(context.Background(), \"taskUUID\", x.taskId, \"token\", x.token)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n//\n//type OutputReq struct {\n//\tName     string              `json:\"name\"`\n//\tQuery    string              `json:\"query\"`\n//\tLabels   map[string][]string `json:\"labels\"`\n//\tMetadata Metadata            `json:\"metadata\"`\n//}\n//\n//type Metadata struct {\n//\tId           string   `json:\"id\"`\n//\tSeverity     string   `json:\"severity\"`\n//\tProvider     string   `json:\"provider\"`\n//\tTags         []string `json:\"tags\"`\n//\tSrcTableName []string `json:\"src_table_name\"`\n//\tRemediation  string   `yaml:\"remediation\" json:\"remediation\"`\n//\tAuthor       string   `json:\"author\"`\n//\tTitle        string   `json:\"title\"`\n//\tDescription  string   `json:\"description\"`\n//\tOutput       string   `json:\"output\"`\n//}\n//\n//type OutputRes struct {\n//}\n//\n//type UploadWorkplaceRes struct {\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n//\n//type Response[T any] struct {\n//\tCode int    `json:\"code\"`\n//\tData T      `json:\"data\"`\n//\tMsg  string `json:\"msg\"`\n//}\n//\n//func (x *Response[T]) IsResponseCodeOk() bool {\n//\treturn x.Code == 200\n//}\n//\n//func (x *Response[T]) Check() error {\n//\tif !x.IsResponseCodeOk() {\n//\t\t// TODO error\n//\t\treturn errors.New(\"\")\n//\t}\n//\treturn nil\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n\n//func (x *CloudClient) CliHttpClient[T any](method, url string, info interface{}) (*Response[T], error) {\n//\tvar client http.Client\n//\thttpLogger.Info(\"request info: %s , %s\", url, info)\n//\tbytesData, err := json.Marshal(info)\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\treq, err := http.NewRequest(method, \"https://\"+global.SERVER+url, bytes.NewReader(bytesData))\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\treq.Header.Set(\"Content-Type\", \"application/json\")\n//\tresp, err := client.Do(req)\n//\tif err != nil {\n//\t\tfmt.Println(err.Error())\n//\t\treturn nil, err\n//\t}\n//\tdefer resp.Body.Close()\n//\tif resp.StatusCode == http.StatusNotFound {\n//\t\treturn nil, errors.New(\"404 not found\")\n//\t}\n//\trespBytes, err := io.ReadAll(resp.Body)\n//\thttpLogger.Info(\"resp info: %s , %s\", url, string(respBytes))\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\tvar res Response[T]\n//\terr = json.Unmarshal(respBytes, &res)\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\treturn &res, err\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n//func (x *CloudClient) buildAPIURL(apiRequestPath string) string {\n//\treturn path.Join(x.serverUrl, apiRequestPath)\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n//\n//func f() {\n//\tctx := context.Background()\n//\tvar conn *grpc.ClientConn\n//\tvar err error\n//\tgrpc.DialContext()\n//\tconn, err = grpc.Dial(getDial(), grpc.WithTransportCredentials(insecure.NewCredentials()))\n//\tif err != nil {\n//\t}\n//\n//\tinnerClient := client{\n//\t\tctx:       ctx,\n//\t\tconn:      conn,\n//\t\tstatusMap: make(map[string]string),\n//\t}\n//\n//\tvar openedLogStreamClient logPb.Log_UploadLogStreamClient\n//\tlogClient := logPb.NewLogClient(conn)\n//\tinnerClient.logClient = logClient\n//\n//\topenedLogStreamClient, err = logClient.UploadLogStream(ctx)\n//\tif err != nil {\n//\t\treturn\n//\t}\n//\tinnerClient.openedLogStreamClient = openedLogStreamClient\n//\n//\tvar openedIssueStreamClient issue.Issue_UploadIssueStreamClient\n//\tissueStreamClient := issue.NewIssueClient(conn)\n//\topenedIssueStreamClient, err = issueStreamClient.UploadIssueStream(ctx)\n//\tif err != nil {\n//\t\treturn\n//\t}\n//\tinnerClient.openedIssueStreamClient = openedIssueStreamClient\n//\n//\tutils.MultiRegisterClose(map[string]func(){\n//\t\t\"grpc conn\": func() {\n//\t\t\t_ = conn.Close()\n//\t\t},\n//\t\t\"log stream\": func() {\n//\t\t\t_ = openedLogStreamClient.CloseSend()\n//\t\t},\n//\t\t\"issue stream\": func() {\n//\t\t\t_ = openedIssueStreamClient.CloseSend()\n//\t\t},\n//\t})\n//\n//\tc = &innerClient\n//})\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/cloud_sdk/sdk_test.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n\t\"github.com/selefra/selefra/pkg/cli_env\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/issue\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/log\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n\t\"testing\"\n)\n\nfunc getUnAuthSDKClientForTest() *CloudClient {\n\tclient, diagnostics := NewCloudClient(cli_env.GetServerHost())\n\tif utils.HasError(diagnostics) {\n\t\tpanic(diagnostics.ToString())\n\t}\n\treturn client\n}\n\nfunc getAuthedSDKClientForTest() *CloudClient {\n\tclient, diagnostics := NewCloudClient(cli_env.GetServerHost())\n\tif utils.HasError(diagnostics) {\n\t\tpanic(diagnostics.ToString())\n\t}\n\ttoken := cli_env.GetCloudToken()\n\tcloudCredentials, d := client.Login(token)\n\tif utils.HasError(d) {\n\t\tpanic(d.ToString())\n\t}\n\tif cloudCredentials == nil {\n\t\tpanic(\"cloud credentials is nil\")\n\t}\n\treturn client\n}\n\nfunc TestNewCloudClient(t *testing.T) {\n\tclient, diagnostics := NewCloudClient(cli_env.GetServerHost())\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, client)\n}\n\nfunc TestCloudClient_NewIssueStreamUploader(t *testing.T) {\n\tclient := getAuthedSDKClientForTest()\n\n\tproject, diagnostics := client.CreateProject(\"cli-test-project-\" + id_util.RandomId())\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, project)\n\n\t_, d := client.CreateTask(project.Name)\n\tassert.False(t, utils.HasError(d))\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.ToString())\n\t\t}\n\t})\n\t//client.MessageChannel = messageChannel\n\tissueStreamUploader, d := client.NewIssueStreamUploader(messageChannel)\n\tassert.False(t, utils.HasError(d))\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tissueStreamUploader.RunUploaderWorker()\n\n\tfor i := 0; i < 10000; i++ {\n\t\tok, d := issueStreamUploader.Submit(context.Background(), i, &issue.UploadIssueStream_Request{\n\t\t\tIndex: int32(i),\n\t\t\tRule: &issue.UploadIssueStream_Rule{\n\t\t\t\tName:     \"test-rule\",\n\t\t\t\tQuery:    \"selefra * from 1\",\n\t\t\t\tMetadata: &issue.UploadIssueStream_Metadata{},\n\t\t\t\tOutput:   \"output\",\n\t\t\t},\n\t\t\tProvider: nil,\n\t\t\tModule:   nil,\n\t\t\tContext: &issue.UploadIssueStream_Context{\n\t\t\t\tSrcTableNames: []string{\n\t\t\t\t\t\"foo\", \"bar\", \"test\",\n\t\t\t\t},\n\t\t\t\tSchema: \"test\",\n\t\t\t},\n\t\t})\n\t\tassert.Nil(t, d)\n\t\tassert.True(t, ok)\n\t}\n\n\tissueStreamUploader.ShutdownAndWait(context.Background())\n\tmessageChannel.ReceiverWait()\n\n}\n\nfunc TestCloudClient_NewLogStreamUploader(t *testing.T) {\n\tclient := getAuthedSDKClientForTest()\n\n\tproject, diagnostics := client.CreateProject(\"cli-test-project-\" + id_util.RandomId())\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, project)\n\n\t_, d := client.CreateTask(project.Name)\n\tassert.False(t, utils.HasError(d))\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.ToString())\n\t\t}\n\t})\n\t//client.MessageChannel = messageChannel\n\tlogClient, logStreamUploader, d := client.NewLogStreamUploader(messageChannel)\n\tassert.False(t, utils.HasError(d))\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tlogStreamUploader.RunUploaderWorker()\n\n\tstatus, err := logClient.UploadLogStatus(client.BuildMetaContext(), &log.UploadLogStatus_Request{\n\t\tStage:  log.StageType_STAGE_TYPE_INITIALIZING,\n\t\tStatus: log.Status_STATUS_SUCCESS,\n\t\tTime:   timestamppb.Now(),\n\t})\n\tassert.Nil(t, err)\n\tassert.NotNil(t, status)\n\n\tfor i := 0; i < 10000; i++ {\n\t\tok, d := logStreamUploader.Submit(context.Background(), i, &log.UploadLogStream_Request{\n\t\t\tStage: log.StageType_STAGE_TYPE_INITIALIZING,\n\t\t\tIndex: uint64(i),\n\t\t\tMsg:   fmt.Sprintf(\"test %d\", i),\n\t\t\tLevel: log.Level_LEVEL_DEBUG,\n\t\t\tTime:  timestamppb.Now(),\n\t\t})\n\t\tassert.Nil(t, d)\n\t\tassert.True(t, ok)\n\t}\n\n\tlogStreamUploader.ShutdownAndWait(context.Background())\n\tmessageChannel.ReceiverWait()\n\n}\n"
  },
  {
    "path": "pkg/cloud_sdk/task.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/cloud\"\n\t\"os\"\n)\n\nfunc (x *CloudClient) CreateTask(projectName string) (*cloud.CreateTask_Response, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tif !x.IsLoggedIn() {\n\t\treturn nil, diagnostics.AddErrorMsg(\"You need login first!\")\n\t}\n\n\tresponse, err := x.cloudClient.CreateTask(x.BuildMetaContext(), &cloud.CreateTask_Request{\n\t\tProjectName: projectName,\n\t\tTaskId:      os.Getenv(\"SELEFRA_TASK_ID\"),\n\t\tTaskSource:  os.Getenv(\"SELEFRA_TASK_SOURCE\"),\n\t\tName:        os.Getenv(\"SELEFRA_TASK_NAME\"),\n\t})\n\tif err != nil {\n\t\treturn nil, diagnostics.AddErrorMsg(\"create cloud task failed: %s\", err.Error())\n\t}\n\n\tif response.Diagnosis != nil && response.Diagnosis.Code != 0 {\n\t\treturn nil, diagnostics.AddErrorMsg(\"create cloud task response error, code = %d, message = %s\", response.Diagnosis.Code, response.Diagnosis.Msg)\n\t}\n\n\t//d := x.InitTaskClientContext(response.TaskId, )\n\t//if diagnostics.AddDiagnostics(d).HasError() {\n\t//\treturn diagnostics\n\t//}\n\n\tx.taskId = response.TaskId\n\n\treturn response, diagnostics\n}\n\n//// ------------------------------------------------- --------------------------------------------------------------------\n//\n//type TaskData struct {\n//\tTaskUUID string `json:\"task_uuid\"`\n//}\n//\n//type CreateTaskRequest struct {\n//\tToken       string `json:\"token\"`\n//\tProjectName string `json:\"project_name\"`\n//\tTaskID      string `json:\"task_id\"`\n//\tTaskSource  string `json:\"task_source\"`\n//}\n//\n//// TryCreateTask create a task in selefra cloud when use is login, else do nothing\n//func (x *CloudClient) TryCreateTask(ctx context.Context, projectName string) (*Response[TaskData], error) {\n//\n//\tif !x.IsLoggedIn() {\n//\t\treturn nil, ErrYouAreNotLogin\n//\t}\n//\n//\trequestBody := &CreateTaskRequest{\n//\t\tToken:       x.token,\n//\t\tProjectName: projectName,\n//\t\tTaskID:      os.Getenv(\"SELEFRA_TASK_ID\"),\n//\t\tTaskSource:  os.Getenv(\"SELEFRA_TASK_SOURCE\"),\n//\t}\n//\treturn http_client.PostJson[*CreateTaskRequest, *Response[TaskData]](ctx, x.buildAPIURL(\"/cli/create_task\"), requestBody)\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/cloud_sdk/task_test.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestCloudClient_CreateTask(t *testing.T) {\n\tclient := getAuthedSDKClientForTest()\n\n\tproject, diagnostics := client.CreateProject(\"cli-test-project-\" + id_util.RandomId())\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, project)\n\n\tresponse, d := client.CreateTask(project.Name)\n\tassert.False(t, utils.HasError(d))\n\tassert.NotNil(t, response)\n}\n"
  },
  {
    "path": "pkg/cloud_sdk/test_data/sync_workspace/a.yml",
    "content": "a: b\nc: d"
  },
  {
    "path": "pkg/cloud_sdk/test_data/sync_workspace/b.yml",
    "content": "ff: fff\nooo: asdasd"
  },
  {
    "path": "pkg/cloud_sdk/user.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/cloud\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/common\"\n\t\"time\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// IsLoggedIn Check whether the login status is current\nfunc (x *CloudClient) IsLoggedIn() bool {\n\treturn x.token != \"\"\n}\n\nfunc (x *CloudClient) SetToken(token string) {\n\tx.token = token\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Login Try to log in with the given token\nfunc (x *CloudClient) Login(token string) (*CloudCredentials, *schema.Diagnostics) {\n\tdiagnostics := schema.NewDiagnostics()\n\tif token == \"\" {\n\t\treturn nil, diagnostics.AddErrorMsg(\"Token can not be empty for login\")\n\t}\n\tctx, cancelFunc := context.WithTimeout(context.Background(), time.Second*30)\n\tdefer cancelFunc()\n\tresponse, err := x.cloudNoAuthClient.Login(ctx, &cloud.Login_Request{\n\t\tToken: token,\n\t})\n\tif err != nil {\n\t\treturn nil, diagnostics.AddErrorMsg(\"Login failed: %s\", err.Error())\n\t}\n\tif response.Diagnosis != nil && response.Diagnosis.Code != 0 {\n\t\tswitch response.Diagnosis.Code {\n\t\tcase common.Diagnosis_IllegalToken:\n\t\t\treturn nil, diagnostics.AddErrorMsg(\"Login failed, The Selefra Cloud recognizes that the token you entered is not a valid Token\")\n\t\tdefault:\n\t\t\treturn nil, diagnostics.AddErrorMsg(\"Login response error, code = %d, message = %s\", response.Diagnosis.Code, response.Diagnosis.Msg)\n\t\t}\n\t}\n\tx.token = token\n\tcredentials := &CloudCredentials{\n\t\tTokenName:   response.TokenName,\n\t\tToken:       token,\n\t\tUserName:    response.UserName,\n\t\tOrgName:     response.OrgName,\n\t\tServerHost:  response.ServerHost,\n\t\tLoginTime:   time.Now(),\n\t\tLastUseTime: time.Now(),\n\t}\n\td := x.SaveCredentials(credentials)\n\n\treturn credentials, diagnostics.AddDiagnostics(d)\n}\n\n// Logout Log out the current token\nfunc (x *CloudClient) Logout() *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tif x.token == \"\" {\n\t\treturn diagnostics.AddErrorMsg(\"You need login first!\")\n\t}\n\n\tresponse, err := x.cloudClient.Logout(x.BuildMetaContext(), &cloud.RequestEmpty{})\n\tif err != nil {\n\t\treturn diagnostics.AddErrorMsg(\"Logout failed: %s\", err.Error())\n\t}\n\tif response.Diagnosis != nil && response.Diagnosis.Code != 0 {\n\t\treturn diagnostics.AddErrorMsg(\"Logout failed, cloud response code error, code = %d, message = %s\", response.Diagnosis.Code, response.Diagnosis.Msg)\n\t}\n\n\t// clear current client\n\tx.token = \"\"\n\t//x.LogClient = nil\n\t//x.LogStreamUploader = nil\n\t//x.IssueStreamUploader = nil\n\n\t// remove local save credentials\n\td := x.SaveCredentials(&CloudCredentials{})\n\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\treturn diagnostics\n\t}\n\n\treturn diagnostics\n}\n\n//type LoginRequest struct {\n//\tToken string `json:\"token\"`\n//}\n//\n//type LoginData struct {\n//\tUserName  string `json:\"user_name\"`\n//\tTokenName string `json:\"token_name\"`\n//\tOrgName   string `json:\"org_name\"`\n//}\n//\n//func (x *CloudClient) Login(ctx context.Context, token string) (*Response[LoginData], error) {\n//\tresponse, err := http_client.PostJson[*LoginRequest, *Response[LoginData]](ctx, x.buildAPIURL(\"/cli/login\"), &LoginRequest{\n//\t\tToken: token,\n//\t})\n//\tif err != nil {\n//\t\treturn nil, err\n//\t}\n//\tif err := response.Check(); err != nil {\n//\t\treturn nil, err\n//\t}\n//\tx.token = token\n//\treturn response, nil\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n//\n//type logoutData struct {\n//}\n//\n//type LogoutRequest struct {\n//\tToken string `json:\"token\"`\n//}\n//\n//type LogoutResponse struct {\n//}\n//\n//func (x *CloudClient) Logout(ctx context.Context) error {\n//\n//\tvar info = make(map[string]string)\n//\tinfo[\"token\"] = token\n//\tres, err := CliHttpClient[logoutData](\"POST\", \"/cli/logout\", info)\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tif res.Code != 0 {\n//\t\treturn fmt.Errorf(res.Msg)\n//\t}\n//\treturn nil\n//}\n\n//func (x *CloudClient) Logout() error {\n//\n//\tif x.token == \"\" {\n//\t\treturn fmt.Errorf(\"not login status\")\n//\t}\n//\n//\terr := http_client.Logout(token)\n//\tif err != nil {\n//\t\tui.Errorln(\"Logout error:\" + err.Error())\n//\t\treturn nil\n//\t}\n//\n//\terr = utils.SaveCredentials(\"\")\n//\tif err != nil {\n//\t\tui.Errorln(err.Error())\n//\t}\n//\n//\treturn nil\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n\n//// ShouldLogin should login to selefra cloud\n//// if login successfully, global token will be set, else return an error\n//func (x *CloudClient) ShouldLogin(tokens ...string) error {\n//\tvar err error\n//\tvar token string\n//\tif len(tokens) > 0 {\n//\t\ttoken = tokens[0]\n//\t}\n//\n//\tif token == \"\" {\n//\t\ttoken, err = utils.GetCredentialsToken()\n//\t\tif err != nil {\n//\t\t\tui.Errorln(err.Error())\n//\t\t\treturn err\n//\t\t}\n//\t}\n//\n//\tres, err := http_client.Login(token)\n//\tif err != nil {\n//\t\treturn ErrLoginFailed\n//\t}\n//\tdisplayLoginSuccess(res.Data.OrgName, res.Data.TokenName, token)\n//\n//\treturn nil\n//}\n\n//// MustLogin unless the user enters wrong token, login is guaranteed\n//func MustLogin(token string) error {\n//\tvar err error\n//\n//\tif err := ShouldLogin(token); err == nil {\n//\t\treturn nil\n//\t}\n//\n//\ttoken, err = getInputToken()\n//\tif err != nil {\n//\t\treturn errors.New(\"input token failed\")\n//\t}\n//\tif err = ShouldLogin(token); err == nil {\n//\t\treturn nil\n//\t}\n//\n//\treturn ErrLoginFailed\n//}\n"
  },
  {
    "path": "pkg/cloud_sdk/user_test.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestCloudClient_IsLoggedIn(t *testing.T) {\n\n\t// unauth\n\tunAuthCloudClient := getUnAuthSDKClientForTest()\n\tassert.False(t, unAuthCloudClient.IsLoggedIn())\n\n\t// auth\n\tauthCloudClient := getAuthedSDKClientForTest()\n\tassert.True(t, authCloudClient.IsLoggedIn())\n\n}\n\nfunc TestCloudClient_Login(t *testing.T) {\n\tclient := getAuthedSDKClientForTest()\n\tassert.NotNil(t, client)\n}\n\nfunc TestCloudClient_Logout(t *testing.T) {\n\tclient := getAuthedSDKClientForTest()\n\tassert.True(t, client.IsLoggedIn())\n\td := client.Logout()\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tassert.False(t, utils.HasError(d))\n\tassert.False(t, client.IsLoggedIn())\n}\n"
  },
  {
    "path": "pkg/cloud_sdk/workspace.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/cloud\"\n\t\"github.com/selefra/selefra/pkg/modules/module_loader\"\n\t\"os\"\n\t\"path/filepath\"\n)\n\nfunc (x *CloudClient) UploadWorkspace(ctx context.Context, projectName, workspace string) *schema.Diagnostics {\n\tdiagnostics := schema.NewDiagnostics()\n\tfileSlice, err := workspaceYamlFileSlice(workspace)\n\tif err != nil {\n\t\treturn diagnostics.AddErrorMsg(\"make workspace file map error: %s\", err.Error())\n\t}\n\tresponse, err := x.cloudClient.SyncWorkplace(x.BuildMetaContext(), &cloud.SyncWorkplace_Request{\n\t\tProjectName:      projectName,\n\t\tProjectWorkplace: fileSlice,\n\t})\n\tif err != nil {\n\t\treturn schema.NewDiagnostics().AddErrorMsg(\"upload workspace file error: %s\", err.Error())\n\t}\n\tif response.Diagnosis != nil && response.Diagnosis.Code != 0 {\n\t\treturn schema.NewDiagnostics().AddErrorMsg(\"upload workspace file response error, code = %d, message = %s\", response.Diagnosis.Code, response.Diagnosis.Msg)\n\t}\n\treturn nil\n}\n\nfunc workspaceYamlFileSlice(dirname string) ([]*cloud.SyncWorkplace_ProjectWorkplace, error) {\n\n\tfileSlice := make([]*cloud.SyncWorkplace_ProjectWorkplace, 0)\n\tvar fn func(dirname string) error\n\tfn = func(dirname string) error {\n\t\tfiles, e := os.ReadDir(dirname)\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t\tfor _, file := range files {\n\t\t\tif file.IsDir() {\n\t\t\t\tif err := fn(filepath.Join(dirname, file.Name())); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif module_loader.IsYamlFile(file) {\n\t\t\t\t\tb, e := os.ReadFile(filepath.Join(dirname, file.Name()))\n\t\t\t\t\tif e != nil {\n\t\t\t\t\t\treturn e\n\t\t\t\t\t}\n\t\t\t\t\tfileSlice = append(fileSlice, &cloud.SyncWorkplace_ProjectWorkplace{\n\t\t\t\t\t\tPath:        filepath.Join(dirname, file.Name()),\n\t\t\t\t\t\tYamlContent: string(b),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tif err := fn(dirname); err != nil {\n\t\treturn nil, err\n\t}\n\treturn fileSlice, nil\n}\n\n//\n//type ModuleLocalDirectory struct {\n//}\n//\n//type WorkPlaceReq struct {\n//\tData        []Data `json:\"data\"`\n//\tProjectName string `json:\"project_name\"`\n//\tToken       string `json:\"token\"`\n//}\n//\n//type Data struct {\n//\tPath        string `json:\"path\"`\n//\tYAMLContent string `json:\"yaml_content\"`\n//}\n//\n//func (x *CloudClient) UploadWorkspace(project string) error {\n//\tfileMap, err := config.FileMap(global.WorkSpace())\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\terr = http_client.TryUploadWorkspace(project, fileMap)\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\treturn nil\n//}\n//\n//// TODO Do not upload sensitive information\n//// TryUploadWorkspace upload downloadWorkspace to selefra cloud when use is login, else do nothing\n//func (x *CloudClient) TryUploadWorkspace(project string, fileMap map[string]string) error {\n//\tif global.Token() == \"\" || project == \"\" {\n//\t\treturn nil\n//\t}\n//\n//\tvar workplace WorkPlaceReq\n//\n//\tworkplace.Token = global.Token()\n//\tworkplace.ProjectName = project\n//\tworkplace.Data = make([]Data, 0)\n//\tfor k, v := range fileMap {\n//\t\tworkplace.Data = append(workplace.Data, Data{\n//\t\t\tPath:        k,\n//\t\t\tYAMLContent: v,\n//\t\t})\n//\t}\n//\tres, err := CliHttpClient[UploadWorkplaceRes](\"POST\", \"/cli/upload_workplace\", workplace)\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tif res.Code != 0 {\n//\t\treturn errors.New(res.Msg)\n//\t}\n//\treturn nil\n//}\n"
  },
  {
    "path": "pkg/cloud_sdk/workspace_test.go",
    "content": "package cloud_sdk\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestCloudClient_UploadWorkspace(t *testing.T) {\n\tclient := getAuthedSDKClientForTest()\n\n\tproject, diagnostics := client.CreateProject(\"cli-test-project-\" + id_util.RandomId())\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, project)\n\n\td := client.UploadWorkspace(context.Background(), project.Name, \"./test_data/sync_workspace\")\n\tassert.False(t, utils.HasError(d))\n}\n"
  },
  {
    "path": "pkg/debug/debug.go",
    "content": "package debug\n\nimport (\n\t\"errors\"\n\t\"github.com/natefinch/lumberjack\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"go.uber.org/zap\"\n\t\"go.uber.org/zap/zapcore\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime/pprof\"\n\t\"sync\"\n\t\"time\"\n)\n\n// SamplingService A service that samples the currently running program\ntype SamplingService struct {\n\n\t// The directory to which the sampled file is output\n\toutputDirectory string\n\n\t// Whether the system is running\n\tlock      sync.Mutex\n\tisRunning bool\n\n\tsamplingInterval time.Duration\n\n\tlogger *zap.Logger\n}\n\nfunc NewSamplingService(outputDirectory string, samplingInterval time.Duration) *SamplingService {\n\t_ = utils.EnsureDirectoryExists(outputDirectory)\n\n\t// init logger\n\tcore := zapcore.NewCore(getEncoder(), getLogWriter(outputDirectory), zapcore.DebugLevel)\n\tlogger := zap.New(core, zap.AddCaller())\n\n\treturn &SamplingService{\n\t\toutputDirectory:  outputDirectory,\n\t\tlock:             sync.Mutex{},\n\t\tisRunning:        false,\n\t\tsamplingInterval: samplingInterval,\n\t\tlogger:           logger,\n\t}\n}\n\nfunc getEncoder() zapcore.Encoder {\n\tencoderConfig := zap.NewProductionEncoderConfig()\n\tencoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder\n\tencoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder\n\treturn zapcore.NewConsoleEncoder(encoderConfig)\n}\n\nfunc getLogWriter(outputDirectory string) zapcore.WriteSyncer {\n\tlumberJackLogger := &lumberjack.Logger{\n\t\tFilename: filepath.Join(outputDirectory, \"pprof.log\"),\n\t\tMaxAge:   30,\n\t\tCompress: false,\n\t}\n\treturn zapcore.AddSync(lumberJackLogger)\n}\n\nfunc (x *SamplingService) Start() error {\n\tx.lock.Lock()\n\tdefer x.lock.Unlock()\n\n\tif x.isRunning {\n\t\treturn errors.New(\"service already running\")\n\t}\n\tx.StartWorker()\n\tx.isRunning = true\n\treturn nil\n}\n\nfunc (x *SamplingService) Stop() {\n\tx.lock.Lock()\n\tdefer x.lock.Unlock()\n\n\tx.isRunning = false\n}\n\nfunc (x *SamplingService) IsRunning() bool {\n\tx.lock.Lock()\n\tdefer x.lock.Unlock()\n\n\treturn x.isRunning\n}\n\nfunc (x *SamplingService) StartWorker() {\n\tgo func() {\n\t\tdefer func() {\n\t\t\tx.logger.Debug(\"pprof sampling worker exit\")\n\t\t}()\n\t\tfor x.IsRunning() {\n\t\t\tx.SamplingOnce()\n\t\t\ttime.Sleep(x.samplingInterval)\n\t\t}\n\t}()\n\n}\n\nfunc (x *SamplingService) SamplingOnce() {\n\tx.logger.Debug(\"begin pprof sampling...\")\n\tbegin := time.Now()\n\tfor _, profile := range pprof.Profiles() {\n\t\toutputFilePath := filepath.Join(x.outputDirectory, profile.Name()+\".pprof\")\n\t\tfile, err := os.OpenFile(outputFilePath, os.O_CREATE|os.O_APPEND, os.ModeAppend|os.ModePerm)\n\t\tif err != nil {\n\t\t\tx.logger.Error(\"sampling error:\", zap.String(\"type\", profile.Name()), zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t\terr = profile.WriteTo(file, 1)\n\t\tif err != nil {\n\t\t\tx.logger.Error(\"save sampling error\", zap.String(\"type\", profile.Name()), zap.Error(err))\n\t\t\tcontinue\n\t\t}\n\t}\n\tcost := time.Now().Sub(begin)\n\tx.logger.Debug(\"pprof sampling done\", zap.String(\"cost\", cost.String()))\n}\n"
  },
  {
    "path": "pkg/debug/debug_test.go",
    "content": "package debug\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestNewSamplingService(t *testing.T) {\n\tservice := NewSamplingService(\"./test_download\", time.Second*30)\n\terr := service.Start()\n\tassert.Nil(t, err)\n\n\ttime.Sleep(time.Minute * 30)\n\tservice.Stop()\n}\n"
  },
  {
    "path": "pkg/grpc/pb/cloud/cloud.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.28.1\n// \tprotoc        v3.21.8\n// source: cloud/cloud.proto\n\npackage cloud\n\nimport (\n\t\"github.com/selefra/selefra/pkg/grpc/pb/common\"\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\treflect \"reflect\"\n\tsync \"sync\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype RequestEmpty struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *RequestEmpty) Reset() {\n\t*x = RequestEmpty{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[0]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *RequestEmpty) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*RequestEmpty) ProtoMessage() {}\n\nfunc (x *RequestEmpty) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[0]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use RequestEmpty.ProtoReflect.Descriptor instead.\nfunc (*RequestEmpty) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{0}\n}\n\ntype ResponseEmpty struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *ResponseEmpty) Reset() {\n\t*x = ResponseEmpty{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[1]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *ResponseEmpty) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ResponseEmpty) ProtoMessage() {}\n\nfunc (x *ResponseEmpty) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[1]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ResponseEmpty.ProtoReflect.Descriptor instead.\nfunc (*ResponseEmpty) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{1}\n}\n\ntype Login struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *Login) Reset() {\n\t*x = Login{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[2]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *Login) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*Login) ProtoMessage() {}\n\nfunc (x *Login) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[2]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use Login.ProtoReflect.Descriptor instead.\nfunc (*Login) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{2}\n}\n\ntype FetchOrgDsn struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *FetchOrgDsn) Reset() {\n\t*x = FetchOrgDsn{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[3]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *FetchOrgDsn) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*FetchOrgDsn) ProtoMessage() {}\n\nfunc (x *FetchOrgDsn) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[3]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use FetchOrgDsn.ProtoReflect.Descriptor instead.\nfunc (*FetchOrgDsn) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{3}\n}\n\ntype Logout struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *Logout) Reset() {\n\t*x = Logout{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[4]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *Logout) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*Logout) ProtoMessage() {}\n\nfunc (x *Logout) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[4]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use Logout.ProtoReflect.Descriptor instead.\nfunc (*Logout) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{4}\n}\n\ntype CreateProject struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *CreateProject) Reset() {\n\t*x = CreateProject{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[5]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *CreateProject) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreateProject) ProtoMessage() {}\n\nfunc (x *CreateProject) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[5]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreateProject.ProtoReflect.Descriptor instead.\nfunc (*CreateProject) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{5}\n}\n\ntype SyncWorkplace struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *SyncWorkplace) Reset() {\n\t*x = SyncWorkplace{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[6]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *SyncWorkplace) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*SyncWorkplace) ProtoMessage() {}\n\nfunc (x *SyncWorkplace) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[6]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use SyncWorkplace.ProtoReflect.Descriptor instead.\nfunc (*SyncWorkplace) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{6}\n}\n\ntype CreateTask struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *CreateTask) Reset() {\n\t*x = CreateTask{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[7]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *CreateTask) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreateTask) ProtoMessage() {}\n\nfunc (x *CreateTask) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[7]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreateTask.ProtoReflect.Descriptor instead.\nfunc (*CreateTask) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{7}\n}\n\ntype Login_Request struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tToken string `protobuf:\"bytes,1,opt,name=token,proto3\" json:\"token,omitempty\"`\n}\n\nfunc (x *Login_Request) Reset() {\n\t*x = Login_Request{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[8]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *Login_Request) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*Login_Request) ProtoMessage() {}\n\nfunc (x *Login_Request) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[8]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use Login_Request.ProtoReflect.Descriptor instead.\nfunc (*Login_Request) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{2, 0}\n}\n\nfunc (x *Login_Request) GetToken() string {\n\tif x != nil {\n\t\treturn x.Token\n\t}\n\treturn \"\"\n}\n\ntype Login_Response struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tDiagnosis  *common.Diagnosis `protobuf:\"bytes,1,opt,name=diagnosis,proto3\" json:\"diagnosis,omitempty\"`\n\tTokenName  string            `protobuf:\"bytes,2,opt,name=token_name,json=tokenName,proto3\" json:\"token_name,omitempty\"`\n\tUserName   string            `protobuf:\"bytes,3,opt,name=user_name,json=userName,proto3\" json:\"user_name,omitempty\"`\n\tOrgName    string            `protobuf:\"bytes,4,opt,name=org_name,json=orgName,proto3\" json:\"org_name,omitempty\"`\n\tServerHost string            `protobuf:\"bytes,5,opt,name=server_host,json=serverHost,proto3\" json:\"server_host,omitempty\"`\n}\n\nfunc (x *Login_Response) Reset() {\n\t*x = Login_Response{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[9]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *Login_Response) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*Login_Response) ProtoMessage() {}\n\nfunc (x *Login_Response) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[9]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use Login_Response.ProtoReflect.Descriptor instead.\nfunc (*Login_Response) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{2, 1}\n}\n\nfunc (x *Login_Response) GetDiagnosis() *common.Diagnosis {\n\tif x != nil {\n\t\treturn x.Diagnosis\n\t}\n\treturn nil\n}\n\nfunc (x *Login_Response) GetTokenName() string {\n\tif x != nil {\n\t\treturn x.TokenName\n\t}\n\treturn \"\"\n}\n\nfunc (x *Login_Response) GetUserName() string {\n\tif x != nil {\n\t\treturn x.UserName\n\t}\n\treturn \"\"\n}\n\nfunc (x *Login_Response) GetOrgName() string {\n\tif x != nil {\n\t\treturn x.OrgName\n\t}\n\treturn \"\"\n}\n\nfunc (x *Login_Response) GetServerHost() string {\n\tif x != nil {\n\t\treturn x.ServerHost\n\t}\n\treturn \"\"\n}\n\ntype FetchOrgDsn_Response struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tDiagnosis *common.Diagnosis `protobuf:\"bytes,1,opt,name=diagnosis,proto3\" json:\"diagnosis,omitempty\"`\n\tDsn       string            `protobuf:\"bytes,2,opt,name=dsn,proto3\" json:\"dsn,omitempty\"`\n}\n\nfunc (x *FetchOrgDsn_Response) Reset() {\n\t*x = FetchOrgDsn_Response{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[10]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *FetchOrgDsn_Response) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*FetchOrgDsn_Response) ProtoMessage() {}\n\nfunc (x *FetchOrgDsn_Response) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[10]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use FetchOrgDsn_Response.ProtoReflect.Descriptor instead.\nfunc (*FetchOrgDsn_Response) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{3, 0}\n}\n\nfunc (x *FetchOrgDsn_Response) GetDiagnosis() *common.Diagnosis {\n\tif x != nil {\n\t\treturn x.Diagnosis\n\t}\n\treturn nil\n}\n\nfunc (x *FetchOrgDsn_Response) GetDsn() string {\n\tif x != nil {\n\t\treturn x.Dsn\n\t}\n\treturn \"\"\n}\n\ntype Logout_Response struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tDiagnosis *common.Diagnosis `protobuf:\"bytes,1,opt,name=diagnosis,proto3\" json:\"diagnosis,omitempty\"`\n}\n\nfunc (x *Logout_Response) Reset() {\n\t*x = Logout_Response{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[11]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *Logout_Response) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*Logout_Response) ProtoMessage() {}\n\nfunc (x *Logout_Response) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[11]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use Logout_Response.ProtoReflect.Descriptor instead.\nfunc (*Logout_Response) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{4, 0}\n}\n\nfunc (x *Logout_Response) GetDiagnosis() *common.Diagnosis {\n\tif x != nil {\n\t\treturn x.Diagnosis\n\t}\n\treturn nil\n}\n\ntype CreateProject_Request struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tName string `protobuf:\"bytes,1,opt,name=name,proto3\" json:\"name,omitempty\"`\n}\n\nfunc (x *CreateProject_Request) Reset() {\n\t*x = CreateProject_Request{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[12]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *CreateProject_Request) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreateProject_Request) ProtoMessage() {}\n\nfunc (x *CreateProject_Request) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[12]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreateProject_Request.ProtoReflect.Descriptor instead.\nfunc (*CreateProject_Request) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{5, 0}\n}\n\nfunc (x *CreateProject_Request) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\ntype CreateProject_Response struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tDiagnosis *common.Diagnosis `protobuf:\"bytes,1,opt,name=diagnosis,proto3\" json:\"diagnosis,omitempty\"`\n\tName      string            `protobuf:\"bytes,2,opt,name=name,proto3\" json:\"name,omitempty\"`\n\tOrgName   string            `protobuf:\"bytes,3,opt,name=org_name,json=orgName,proto3\" json:\"org_name,omitempty\"`\n}\n\nfunc (x *CreateProject_Response) Reset() {\n\t*x = CreateProject_Response{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[13]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *CreateProject_Response) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreateProject_Response) ProtoMessage() {}\n\nfunc (x *CreateProject_Response) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[13]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreateProject_Response.ProtoReflect.Descriptor instead.\nfunc (*CreateProject_Response) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{5, 1}\n}\n\nfunc (x *CreateProject_Response) GetDiagnosis() *common.Diagnosis {\n\tif x != nil {\n\t\treturn x.Diagnosis\n\t}\n\treturn nil\n}\n\nfunc (x *CreateProject_Response) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\nfunc (x *CreateProject_Response) GetOrgName() string {\n\tif x != nil {\n\t\treturn x.OrgName\n\t}\n\treturn \"\"\n}\n\ntype SyncWorkplace_ProjectWorkplace struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tPath        string `protobuf:\"bytes,1,opt,name=path,proto3\" json:\"path,omitempty\"`\n\tYamlContent string `protobuf:\"bytes,2,opt,name=yaml_content,json=yamlContent,proto3\" json:\"yaml_content,omitempty\"`\n}\n\nfunc (x *SyncWorkplace_ProjectWorkplace) Reset() {\n\t*x = SyncWorkplace_ProjectWorkplace{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[14]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *SyncWorkplace_ProjectWorkplace) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*SyncWorkplace_ProjectWorkplace) ProtoMessage() {}\n\nfunc (x *SyncWorkplace_ProjectWorkplace) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[14]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use SyncWorkplace_ProjectWorkplace.ProtoReflect.Descriptor instead.\nfunc (*SyncWorkplace_ProjectWorkplace) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{6, 0}\n}\n\nfunc (x *SyncWorkplace_ProjectWorkplace) GetPath() string {\n\tif x != nil {\n\t\treturn x.Path\n\t}\n\treturn \"\"\n}\n\nfunc (x *SyncWorkplace_ProjectWorkplace) GetYamlContent() string {\n\tif x != nil {\n\t\treturn x.YamlContent\n\t}\n\treturn \"\"\n}\n\ntype SyncWorkplace_Request struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tProjectName      string                            `protobuf:\"bytes,1,opt,name=project_name,json=projectName,proto3\" json:\"project_name,omitempty\"`\n\tProjectWorkplace []*SyncWorkplace_ProjectWorkplace `protobuf:\"bytes,2,rep,name=project_workplace,json=projectWorkplace,proto3\" json:\"project_workplace,omitempty\"`\n}\n\nfunc (x *SyncWorkplace_Request) Reset() {\n\t*x = SyncWorkplace_Request{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[15]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *SyncWorkplace_Request) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*SyncWorkplace_Request) ProtoMessage() {}\n\nfunc (x *SyncWorkplace_Request) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[15]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use SyncWorkplace_Request.ProtoReflect.Descriptor instead.\nfunc (*SyncWorkplace_Request) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{6, 1}\n}\n\nfunc (x *SyncWorkplace_Request) GetProjectName() string {\n\tif x != nil {\n\t\treturn x.ProjectName\n\t}\n\treturn \"\"\n}\n\nfunc (x *SyncWorkplace_Request) GetProjectWorkplace() []*SyncWorkplace_ProjectWorkplace {\n\tif x != nil {\n\t\treturn x.ProjectWorkplace\n\t}\n\treturn nil\n}\n\ntype SyncWorkplace_Response struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tDiagnosis *common.Diagnosis `protobuf:\"bytes,1,opt,name=diagnosis,proto3\" json:\"diagnosis,omitempty\"`\n}\n\nfunc (x *SyncWorkplace_Response) Reset() {\n\t*x = SyncWorkplace_Response{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[16]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *SyncWorkplace_Response) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*SyncWorkplace_Response) ProtoMessage() {}\n\nfunc (x *SyncWorkplace_Response) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[16]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use SyncWorkplace_Response.ProtoReflect.Descriptor instead.\nfunc (*SyncWorkplace_Response) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{6, 2}\n}\n\nfunc (x *SyncWorkplace_Response) GetDiagnosis() *common.Diagnosis {\n\tif x != nil {\n\t\treturn x.Diagnosis\n\t}\n\treturn nil\n}\n\ntype CreateTask_Request struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tProjectName string `protobuf:\"bytes,1,opt,name=project_name,json=projectName,proto3\" json:\"project_name,omitempty\"`\n\tTaskId      string `protobuf:\"bytes,2,opt,name=task_id,json=taskId,proto3\" json:\"task_id,omitempty\"`\n\tTaskSource  string `protobuf:\"bytes,3,opt,name=task_source,json=taskSource,proto3\" json:\"task_source,omitempty\"`\n\tName        string `protobuf:\"bytes,4,opt,name=name,proto3\" json:\"name,omitempty\"`\n}\n\nfunc (x *CreateTask_Request) Reset() {\n\t*x = CreateTask_Request{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[17]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *CreateTask_Request) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreateTask_Request) ProtoMessage() {}\n\nfunc (x *CreateTask_Request) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[17]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreateTask_Request.ProtoReflect.Descriptor instead.\nfunc (*CreateTask_Request) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{7, 0}\n}\n\nfunc (x *CreateTask_Request) GetProjectName() string {\n\tif x != nil {\n\t\treturn x.ProjectName\n\t}\n\treturn \"\"\n}\n\nfunc (x *CreateTask_Request) GetTaskId() string {\n\tif x != nil {\n\t\treturn x.TaskId\n\t}\n\treturn \"\"\n}\n\nfunc (x *CreateTask_Request) GetTaskSource() string {\n\tif x != nil {\n\t\treturn x.TaskSource\n\t}\n\treturn \"\"\n}\n\nfunc (x *CreateTask_Request) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\ntype CreateTask_Response struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tDiagnosis     *common.Diagnosis `protobuf:\"bytes,1,opt,name=diagnosis,proto3\" json:\"diagnosis,omitempty\"`\n\tTaskId        string            `protobuf:\"bytes,2,opt,name=task_id,json=taskId,proto3\" json:\"task_id,omitempty\"`\n\tProjectTaskId uint64            `protobuf:\"varint,3,opt,name=project_task_id,json=projectTaskId,proto3\" json:\"project_task_id,omitempty\"`\n}\n\nfunc (x *CreateTask_Response) Reset() {\n\t*x = CreateTask_Response{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_cloud_cloud_proto_msgTypes[18]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *CreateTask_Response) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CreateTask_Response) ProtoMessage() {}\n\nfunc (x *CreateTask_Response) ProtoReflect() protoreflect.Message {\n\tmi := &file_cloud_cloud_proto_msgTypes[18]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CreateTask_Response.ProtoReflect.Descriptor instead.\nfunc (*CreateTask_Response) Descriptor() ([]byte, []int) {\n\treturn file_cloud_cloud_proto_rawDescGZIP(), []int{7, 1}\n}\n\nfunc (x *CreateTask_Response) GetDiagnosis() *common.Diagnosis {\n\tif x != nil {\n\t\treturn x.Diagnosis\n\t}\n\treturn nil\n}\n\nfunc (x *CreateTask_Response) GetTaskId() string {\n\tif x != nil {\n\t\treturn x.TaskId\n\t}\n\treturn \"\"\n}\n\nfunc (x *CreateTask_Response) GetProjectTaskId() uint64 {\n\tif x != nil {\n\t\treturn x.ProjectTaskId\n\t}\n\treturn 0\n}\n\nvar File_cloud_cloud_proto protoreflect.FileDescriptor\n\nvar file_cloud_cloud_proto_rawDesc = []byte{\n\t0x0a, 0x11, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x70, 0x72,\n\t0x6f, 0x74, 0x6f, 0x12, 0x05, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x1a, 0x13, 0x63, 0x6f, 0x6d, 0x6d,\n\t0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,\n\t0x0e, 0x0a, 0x0c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22,\n\t0x0f, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x45, 0x6d, 0x70, 0x74, 0x79,\n\t0x22, 0xde, 0x01, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x1a, 0x1f, 0x0a, 0x07, 0x52, 0x65,\n\t0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01,\n\t0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x08,\n\t0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x61, 0x67,\n\t0x6e, 0x6f, 0x73, 0x69, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f,\n\t0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x52, 0x09,\n\t0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x6b,\n\t0x65, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74,\n\t0x6f, 0x6b, 0x65, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72,\n\t0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65,\n\t0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x67, 0x5f, 0x6e, 0x61, 0x6d,\n\t0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x67, 0x4e, 0x61, 0x6d, 0x65,\n\t0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18,\n\t0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73,\n\t0x74, 0x22, 0x5c, 0x0a, 0x0b, 0x46, 0x65, 0x74, 0x63, 0x68, 0x4f, 0x72, 0x67, 0x44, 0x73, 0x6e,\n\t0x1a, 0x4d, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09,\n\t0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,\n\t0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73,\n\t0x69, 0x73, 0x52, 0x09, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x12, 0x10, 0x0a,\n\t0x03, 0x64, 0x73, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x73, 0x6e, 0x22,\n\t0x45, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x1a, 0x3b, 0x0a, 0x08, 0x52, 0x65, 0x73,\n\t0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73,\n\t0x69, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,\n\t0x6e, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x52, 0x09, 0x64, 0x69, 0x61,\n\t0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x22, 0x9a, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74,\n\t0x65, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x1a, 0x1d, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75,\n\t0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,\n\t0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x6a, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f,\n\t0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73,\n\t0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,\n\t0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x52, 0x09, 0x64, 0x69, 0x61, 0x67, 0x6e,\n\t0x6f, 0x73, 0x69, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,\n\t0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x67, 0x5f,\n\t0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x67, 0x4e,\n\t0x61, 0x6d, 0x65, 0x22, 0x9a, 0x02, 0x0a, 0x0d, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x6f, 0x72, 0x6b,\n\t0x70, 0x6c, 0x61, 0x63, 0x65, 0x1a, 0x49, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,\n\t0x57, 0x6f, 0x72, 0x6b, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74,\n\t0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x21, 0x0a,\n\t0x0c, 0x79, 0x61, 0x6d, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20,\n\t0x01, 0x28, 0x09, 0x52, 0x0b, 0x79, 0x61, 0x6d, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,\n\t0x1a, 0x80, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c,\n\t0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,\n\t0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12,\n\t0x52, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x70,\n\t0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x6c, 0x6f,\n\t0x75, 0x64, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x70, 0x6c, 0x61, 0x63, 0x65,\n\t0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x70, 0x6c, 0x61, 0x63,\n\t0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x70, 0x6c,\n\t0x61, 0x63, 0x65, 0x1a, 0x3b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,\n\t0x2f, 0x0a, 0x09, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x18, 0x01, 0x20, 0x01,\n\t0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x61, 0x67,\n\t0x6e, 0x6f, 0x73, 0x69, 0x73, 0x52, 0x09, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73,\n\t0x22, 0x86, 0x02, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x1a,\n\t0x7a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72,\n\t0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,\n\t0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a,\n\t0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,\n\t0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x73,\n\t0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x73,\n\t0x6b, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,\n\t0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x7c, 0x0a, 0x08, 0x52,\n\t0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x64, 0x69, 0x61, 0x67, 0x6e,\n\t0x6f, 0x73, 0x69, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d,\n\t0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x52, 0x09, 0x64,\n\t0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b,\n\t0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49,\n\t0x64, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x61, 0x73,\n\t0x6b, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a,\n\t0x65, 0x63, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x32, 0x45, 0x0a, 0x0b, 0x43, 0x6c, 0x6f,\n\t0x75, 0x64, 0x4e, 0x6f, 0x41, 0x75, 0x74, 0x68, 0x12, 0x36, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69,\n\t0x6e, 0x12, 0x14, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x2e,\n\t0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,\n\t0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,\n\t0x32, 0xea, 0x02, 0x0a, 0x05, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x12, 0x41, 0x0a, 0x0b, 0x46, 0x65,\n\t0x74, 0x63, 0x68, 0x4f, 0x72, 0x67, 0x44, 0x73, 0x6e, 0x12, 0x13, 0x2e, 0x63, 0x6c, 0x6f, 0x75,\n\t0x64, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b,\n\t0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x4f, 0x72, 0x67, 0x44,\n\t0x73, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x37, 0x0a,\n\t0x06, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x13, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,\n\t0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x63,\n\t0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x2e, 0x52, 0x65, 0x73, 0x70,\n\t0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,\n\t0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1c, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,\n\t0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x52, 0x65,\n\t0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x43, 0x72,\n\t0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x52, 0x65, 0x73, 0x70,\n\t0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0d, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x6f,\n\t0x72, 0x6b, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,\n\t0x53, 0x79, 0x6e, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65,\n\t0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x79,\n\t0x6e, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70,\n\t0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,\n\t0x54, 0x61, 0x73, 0x6b, 0x12, 0x19, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x43, 0x72, 0x65,\n\t0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,\n\t0x1a, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61,\n\t0x73, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x0d, 0x5a,\n\t0x0b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x62, 0x06, 0x70, 0x72,\n\t0x6f, 0x74, 0x6f, 0x33,\n}\n\nvar (\n\tfile_cloud_cloud_proto_rawDescOnce sync.Once\n\tfile_cloud_cloud_proto_rawDescData = file_cloud_cloud_proto_rawDesc\n)\n\nfunc file_cloud_cloud_proto_rawDescGZIP() []byte {\n\tfile_cloud_cloud_proto_rawDescOnce.Do(func() {\n\t\tfile_cloud_cloud_proto_rawDescData = protoimpl.X.CompressGZIP(file_cloud_cloud_proto_rawDescData)\n\t})\n\treturn file_cloud_cloud_proto_rawDescData\n}\n\nvar file_cloud_cloud_proto_msgTypes = make([]protoimpl.MessageInfo, 19)\nvar file_cloud_cloud_proto_goTypes = []interface{}{\n\t(*RequestEmpty)(nil),                   // 0: cloud.RequestEmpty\n\t(*ResponseEmpty)(nil),                  // 1: cloud.ResponseEmpty\n\t(*Login)(nil),                          // 2: cloud.Login\n\t(*FetchOrgDsn)(nil),                    // 3: cloud.FetchOrgDsn\n\t(*Logout)(nil),                         // 4: cloud.Logout\n\t(*CreateProject)(nil),                  // 5: cloud.CreateProject\n\t(*SyncWorkplace)(nil),                  // 6: cloud.SyncWorkplace\n\t(*CreateTask)(nil),                     // 7: cloud.CreateTask\n\t(*Login_Request)(nil),                  // 8: cloud.Login.Request\n\t(*Login_Response)(nil),                 // 9: cloud.Login.Response\n\t(*FetchOrgDsn_Response)(nil),           // 10: cloud.FetchOrgDsn.Response\n\t(*Logout_Response)(nil),                // 11: cloud.Logout.Response\n\t(*CreateProject_Request)(nil),          // 12: cloud.CreateProject.Request\n\t(*CreateProject_Response)(nil),         // 13: cloud.CreateProject.Response\n\t(*SyncWorkplace_ProjectWorkplace)(nil), // 14: cloud.SyncWorkplace.ProjectWorkplace\n\t(*SyncWorkplace_Request)(nil),          // 15: cloud.SyncWorkplace.Request\n\t(*SyncWorkplace_Response)(nil),         // 16: cloud.SyncWorkplace.Response\n\t(*CreateTask_Request)(nil),             // 17: cloud.CreateTask.Request\n\t(*CreateTask_Response)(nil),            // 18: cloud.CreateTask.Response\n\t(*common.Diagnosis)(nil),               // 19: common.Diagnosis\n}\nvar file_cloud_cloud_proto_depIdxs = []int32{\n\t19, // 0: cloud.Login.Response.diagnosis:type_name -> common.Diagnosis\n\t19, // 1: cloud.FetchOrgDsn.Response.diagnosis:type_name -> common.Diagnosis\n\t19, // 2: cloud.Logout.Response.diagnosis:type_name -> common.Diagnosis\n\t19, // 3: cloud.CreateProject.Response.diagnosis:type_name -> common.Diagnosis\n\t14, // 4: cloud.SyncWorkplace.Request.project_workplace:type_name -> cloud.SyncWorkplace.ProjectWorkplace\n\t19, // 5: cloud.SyncWorkplace.Response.diagnosis:type_name -> common.Diagnosis\n\t19, // 6: cloud.CreateTask.Response.diagnosis:type_name -> common.Diagnosis\n\t8,  // 7: cloud.CloudNoAuth.Login:input_type -> cloud.Login.Request\n\t0,  // 8: cloud.Cloud.FetchOrgDsn:input_type -> cloud.RequestEmpty\n\t0,  // 9: cloud.Cloud.Logout:input_type -> cloud.RequestEmpty\n\t12, // 10: cloud.Cloud.CreateProject:input_type -> cloud.CreateProject.Request\n\t15, // 11: cloud.Cloud.SyncWorkplace:input_type -> cloud.SyncWorkplace.Request\n\t17, // 12: cloud.Cloud.CreateTask:input_type -> cloud.CreateTask.Request\n\t9,  // 13: cloud.CloudNoAuth.Login:output_type -> cloud.Login.Response\n\t10, // 14: cloud.Cloud.FetchOrgDsn:output_type -> cloud.FetchOrgDsn.Response\n\t11, // 15: cloud.Cloud.Logout:output_type -> cloud.Logout.Response\n\t13, // 16: cloud.Cloud.CreateProject:output_type -> cloud.CreateProject.Response\n\t16, // 17: cloud.Cloud.SyncWorkplace:output_type -> cloud.SyncWorkplace.Response\n\t18, // 18: cloud.Cloud.CreateTask:output_type -> cloud.CreateTask.Response\n\t13, // [13:19] is the sub-list for method output_type\n\t7,  // [7:13] is the sub-list for method input_type\n\t7,  // [7:7] is the sub-list for extension type_name\n\t7,  // [7:7] is the sub-list for extension extendee\n\t0,  // [0:7] is the sub-list for field type_name\n}\n\nfunc init() { file_cloud_cloud_proto_init() }\nfunc file_cloud_cloud_proto_init() {\n\tif File_cloud_cloud_proto != nil {\n\t\treturn\n\t}\n\tif !protoimpl.UnsafeEnabled {\n\t\tfile_cloud_cloud_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*RequestEmpty); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*ResponseEmpty); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*Login); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*FetchOrgDsn); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*Logout); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*CreateProject); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*SyncWorkplace); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*CreateTask); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*Login_Request); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*Login_Response); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*FetchOrgDsn_Response); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*Logout_Response); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*CreateProject_Request); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*CreateProject_Response); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*SyncWorkplace_ProjectWorkplace); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*SyncWorkplace_Request); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*SyncWorkplace_Response); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*CreateTask_Request); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_cloud_cloud_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*CreateTask_Response); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: file_cloud_cloud_proto_rawDesc,\n\t\t\tNumEnums:      0,\n\t\t\tNumMessages:   19,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   2,\n\t\t},\n\t\tGoTypes:           file_cloud_cloud_proto_goTypes,\n\t\tDependencyIndexes: file_cloud_cloud_proto_depIdxs,\n\t\tMessageInfos:      file_cloud_cloud_proto_msgTypes,\n\t}.Build()\n\tFile_cloud_cloud_proto = out.File\n\tfile_cloud_cloud_proto_rawDesc = nil\n\tfile_cloud_cloud_proto_goTypes = nil\n\tfile_cloud_cloud_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "pkg/grpc/pb/cloud/cloud.proto",
    "content": "syntax = \"proto3\";\noption go_package = \"proto/cloud\";\npackage cloud;\nimport \"common/common.proto\";\n// protoc --proto_path=pkg/grpc/pb/cloud --go_out=pkg/grpc/pb/cloud --go_opt=paths=source_relative --go-grpc_out=pkg/grpc/pb/cloud --go-grpc_opt=paths=source_relative  cloud.proto\n\n\n\n\nmessage RequestEmpty {}\n\nmessage ResponseEmpty {}\n\nmessage Login {\n  message Request {\n    string token = 1;\n  }\n\n  message Response {\n    common.Diagnosis diagnosis = 1;\n\n    string token_name = 2;\n    string user_name = 3;\n    string org_name = 4;\n    string server_host = 5;\n  }\n}\n\nmessage FetchOrgDsn {\n  message Response {\n    common.Diagnosis diagnosis = 1;\n    string dsn = 2;\n  }\n}\n\nmessage Logout {\n  message Response {\n    common.Diagnosis diagnosis = 1;\n  }\n}\n\nmessage CreateProject {\n  message Request {\n    string name = 1;\n  }\n\n  message Response {\n    common.Diagnosis diagnosis = 1;\n    string name = 2;\n    string org_name = 3;\n  }\n}\n\n\nmessage SyncWorkplace {\n  message ProjectWorkplace {\n    string path = 1;\n    string yaml_content = 2;\n  }\n\n  message Request {\n    string project_name = 1;\n    repeated ProjectWorkplace project_workplace = 2;\n  }\n\n  message Response {\n    common.Diagnosis diagnosis = 1;\n  }\n}\n\nmessage CreateTask {\n  message Request {\n    string project_name = 1;\n    string task_id = 2;\n    string task_source = 3;\n    string name = 4;\n  }\n\n  message Response {\n    common.Diagnosis diagnosis = 1;\n    string task_id = 2;\n    uint64 project_task_id = 3;\n  }\n}\n\n\nservice CloudNoAuth {\n  rpc Login (Login.Request) returns (Login.Response) {};\n}\n\nservice Cloud {\n  rpc FetchOrgDsn (RequestEmpty) returns (FetchOrgDsn.Response) {};\n\n  rpc Logout (RequestEmpty) returns (Logout.Response) {};\n\n  rpc CreateProject (CreateProject.Request) returns (CreateProject.Response) {}\n\n  rpc SyncWorkplace (SyncWorkplace.Request) returns (SyncWorkplace.Response) {}\n\n  rpc CreateTask (CreateTask.Request) returns (CreateTask.Response) {}\n}"
  },
  {
    "path": "pkg/grpc/pb/cloud/cloud_grpc.pb.go",
    "content": "// Code generated by protoc-gen-go-grpc. DO NOT EDIT.\n// versions:\n// - protoc-gen-go-grpc v1.2.0\n// - protoc             v3.21.8\n// source: cloud.proto\n\npackage cloud\n\nimport (\n\tcontext \"context\"\n\tgrpc \"google.golang.org/grpc\"\n\tcodes \"google.golang.org/grpc/codes\"\n\tstatus \"google.golang.org/grpc/status\"\n)\n\n// This is a compile-time assertion to ensure that this generated file\n// is compatible with the grpc package it is being compiled against.\n// Requires gRPC-Go v1.32.0 or later.\nconst _ = grpc.SupportPackageIsVersion7\n\n// CloudNoAuthClient is the client API for CloudNoAuth service.\n//\n// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.\ntype CloudNoAuthClient interface {\n\tLogin(ctx context.Context, in *Login_Request, opts ...grpc.CallOption) (*Login_Response, error)\n}\n\ntype cloudNoAuthClient struct {\n\tcc grpc.ClientConnInterface\n}\n\nfunc NewCloudNoAuthClient(cc grpc.ClientConnInterface) CloudNoAuthClient {\n\treturn &cloudNoAuthClient{cc}\n}\n\nfunc (c *cloudNoAuthClient) Login(ctx context.Context, in *Login_Request, opts ...grpc.CallOption) (*Login_Response, error) {\n\tout := new(Login_Response)\n\terr := c.cc.Invoke(ctx, \"/cloud.CloudNoAuth/Login\", in, out, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\n// CloudNoAuthServer is the server API for CloudNoAuth service.\n// All implementations must embed UnimplementedCloudNoAuthServer\n// for forward compatibility\ntype CloudNoAuthServer interface {\n\tLogin(context.Context, *Login_Request) (*Login_Response, error)\n\tmustEmbedUnimplementedCloudNoAuthServer()\n}\n\n// UnimplementedCloudNoAuthServer must be embedded to have forward compatible implementations.\ntype UnimplementedCloudNoAuthServer struct {\n}\n\nfunc (UnimplementedCloudNoAuthServer) Login(context.Context, *Login_Request) (*Login_Response, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method Login not implemented\")\n}\nfunc (UnimplementedCloudNoAuthServer) mustEmbedUnimplementedCloudNoAuthServer() {}\n\n// UnsafeCloudNoAuthServer may be embedded to opt out of forward compatibility for this service.\n// Use of this interface is not recommended, as added methods to CloudNoAuthServer will\n// result in compilation errors.\ntype UnsafeCloudNoAuthServer interface {\n\tmustEmbedUnimplementedCloudNoAuthServer()\n}\n\nfunc RegisterCloudNoAuthServer(s grpc.ServiceRegistrar, srv CloudNoAuthServer) {\n\ts.RegisterService(&CloudNoAuth_ServiceDesc, srv)\n}\n\nfunc _CloudNoAuth_Login_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(Login_Request)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(CloudNoAuthServer).Login(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: \"/cloud.CloudNoAuth/Login\",\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(CloudNoAuthServer).Login(ctx, req.(*Login_Request))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\n// CloudNoAuth_ServiceDesc is the grpc.ServiceDesc for CloudNoAuth service.\n// It's only intended for direct use with grpc.RegisterService,\n// and not to be introspected or modified (even as a copy)\nvar CloudNoAuth_ServiceDesc = grpc.ServiceDesc{\n\tServiceName: \"cloud.CloudNoAuth\",\n\tHandlerType: (*CloudNoAuthServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"Login\",\n\t\t\tHandler:    _CloudNoAuth_Login_Handler,\n\t\t},\n\t},\n\tStreams:  []grpc.StreamDesc{},\n\tMetadata: \"cloud.proto\",\n}\n\n// CloudClient is the client API for Cloud service.\n//\n// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.\ntype CloudClient interface {\n\tFetchOrgDsn(ctx context.Context, in *RequestEmpty, opts ...grpc.CallOption) (*FetchOrgDsn_Response, error)\n\tLogout(ctx context.Context, in *RequestEmpty, opts ...grpc.CallOption) (*Logout_Response, error)\n\tCreateProject(ctx context.Context, in *CreateProject_Request, opts ...grpc.CallOption) (*CreateProject_Response, error)\n\tSyncWorkplace(ctx context.Context, in *SyncWorkplace_Request, opts ...grpc.CallOption) (*SyncWorkplace_Response, error)\n\tCreateTask(ctx context.Context, in *CreateTask_Request, opts ...grpc.CallOption) (*CreateTask_Response, error)\n}\n\ntype cloudClient struct {\n\tcc grpc.ClientConnInterface\n}\n\nfunc NewCloudClient(cc grpc.ClientConnInterface) CloudClient {\n\treturn &cloudClient{cc}\n}\n\nfunc (c *cloudClient) FetchOrgDsn(ctx context.Context, in *RequestEmpty, opts ...grpc.CallOption) (*FetchOrgDsn_Response, error) {\n\tout := new(FetchOrgDsn_Response)\n\terr := c.cc.Invoke(ctx, \"/cloud.Cloud/FetchOrgDsn\", in, out, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *cloudClient) Logout(ctx context.Context, in *RequestEmpty, opts ...grpc.CallOption) (*Logout_Response, error) {\n\tout := new(Logout_Response)\n\terr := c.cc.Invoke(ctx, \"/cloud.Cloud/Logout\", in, out, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *cloudClient) CreateProject(ctx context.Context, in *CreateProject_Request, opts ...grpc.CallOption) (*CreateProject_Response, error) {\n\tout := new(CreateProject_Response)\n\terr := c.cc.Invoke(ctx, \"/cloud.Cloud/CreateProject\", in, out, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *cloudClient) SyncWorkplace(ctx context.Context, in *SyncWorkplace_Request, opts ...grpc.CallOption) (*SyncWorkplace_Response, error) {\n\tout := new(SyncWorkplace_Response)\n\terr := c.cc.Invoke(ctx, \"/cloud.Cloud/SyncWorkplace\", in, out, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *cloudClient) CreateTask(ctx context.Context, in *CreateTask_Request, opts ...grpc.CallOption) (*CreateTask_Response, error) {\n\tout := new(CreateTask_Response)\n\terr := c.cc.Invoke(ctx, \"/cloud.Cloud/CreateTask\", in, out, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\n// CloudServer is the server API for Cloud service.\n// All implementations must embed UnimplementedCloudServer\n// for forward compatibility\ntype CloudServer interface {\n\tFetchOrgDsn(context.Context, *RequestEmpty) (*FetchOrgDsn_Response, error)\n\tLogout(context.Context, *RequestEmpty) (*Logout_Response, error)\n\tCreateProject(context.Context, *CreateProject_Request) (*CreateProject_Response, error)\n\tSyncWorkplace(context.Context, *SyncWorkplace_Request) (*SyncWorkplace_Response, error)\n\tCreateTask(context.Context, *CreateTask_Request) (*CreateTask_Response, error)\n\tmustEmbedUnimplementedCloudServer()\n}\n\n// UnimplementedCloudServer must be embedded to have forward compatible implementations.\ntype UnimplementedCloudServer struct {\n}\n\nfunc (UnimplementedCloudServer) FetchOrgDsn(context.Context, *RequestEmpty) (*FetchOrgDsn_Response, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method FetchOrgDsn not implemented\")\n}\nfunc (UnimplementedCloudServer) Logout(context.Context, *RequestEmpty) (*Logout_Response, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method Logout not implemented\")\n}\nfunc (UnimplementedCloudServer) CreateProject(context.Context, *CreateProject_Request) (*CreateProject_Response, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method CreateProject not implemented\")\n}\nfunc (UnimplementedCloudServer) SyncWorkplace(context.Context, *SyncWorkplace_Request) (*SyncWorkplace_Response, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method SyncWorkplace not implemented\")\n}\nfunc (UnimplementedCloudServer) CreateTask(context.Context, *CreateTask_Request) (*CreateTask_Response, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method CreateTask not implemented\")\n}\nfunc (UnimplementedCloudServer) mustEmbedUnimplementedCloudServer() {}\n\n// UnsafeCloudServer may be embedded to opt out of forward compatibility for this service.\n// Use of this interface is not recommended, as added methods to CloudServer will\n// result in compilation errors.\ntype UnsafeCloudServer interface {\n\tmustEmbedUnimplementedCloudServer()\n}\n\nfunc RegisterCloudServer(s grpc.ServiceRegistrar, srv CloudServer) {\n\ts.RegisterService(&Cloud_ServiceDesc, srv)\n}\n\nfunc _Cloud_FetchOrgDsn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(RequestEmpty)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(CloudServer).FetchOrgDsn(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: \"/cloud.Cloud/FetchOrgDsn\",\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(CloudServer).FetchOrgDsn(ctx, req.(*RequestEmpty))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _Cloud_Logout_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(RequestEmpty)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(CloudServer).Logout(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: \"/cloud.Cloud/Logout\",\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(CloudServer).Logout(ctx, req.(*RequestEmpty))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _Cloud_CreateProject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(CreateProject_Request)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(CloudServer).CreateProject(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: \"/cloud.Cloud/CreateProject\",\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(CloudServer).CreateProject(ctx, req.(*CreateProject_Request))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _Cloud_SyncWorkplace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(SyncWorkplace_Request)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(CloudServer).SyncWorkplace(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: \"/cloud.Cloud/SyncWorkplace\",\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(CloudServer).SyncWorkplace(ctx, req.(*SyncWorkplace_Request))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _Cloud_CreateTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(CreateTask_Request)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(CloudServer).CreateTask(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: \"/cloud.Cloud/CreateTask\",\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(CloudServer).CreateTask(ctx, req.(*CreateTask_Request))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\n// Cloud_ServiceDesc is the grpc.ServiceDesc for Cloud service.\n// It's only intended for direct use with grpc.RegisterService,\n// and not to be introspected or modified (even as a copy)\nvar Cloud_ServiceDesc = grpc.ServiceDesc{\n\tServiceName: \"cloud.Cloud\",\n\tHandlerType: (*CloudServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"FetchOrgDsn\",\n\t\t\tHandler:    _Cloud_FetchOrgDsn_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"Logout\",\n\t\t\tHandler:    _Cloud_Logout_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"CreateProject\",\n\t\t\tHandler:    _Cloud_CreateProject_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"SyncWorkplace\",\n\t\t\tHandler:    _Cloud_SyncWorkplace_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"CreateTask\",\n\t\t\tHandler:    _Cloud_CreateTask_Handler,\n\t\t},\n\t},\n\tStreams:  []grpc.StreamDesc{},\n\tMetadata: \"cloud.proto\",\n}\n"
  },
  {
    "path": "pkg/grpc/pb/common/common.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.28.1\n// \tprotoc        v3.21.8\n// source: common/common.proto\n\npackage common\n\nimport (\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\treflect \"reflect\"\n\tsync \"sync\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype Diagnosis_Code int32\n\nconst (\n\tDiagnosis_SUCCESS      Diagnosis_Code = 0\n\tDiagnosis_IllegalToken Diagnosis_Code = 2\n\tDiagnosis_Error        Diagnosis_Code = 7\n\tDiagnosis_NoAuthority  Diagnosis_Code = 11\n)\n\n// Enum value maps for Diagnosis_Code.\nvar (\n\tDiagnosis_Code_name = map[int32]string{\n\t\t0:  \"SUCCESS\",\n\t\t2:  \"IllegalToken\",\n\t\t7:  \"Error\",\n\t\t11: \"NoAuthority\",\n\t}\n\tDiagnosis_Code_value = map[string]int32{\n\t\t\"SUCCESS\":      0,\n\t\t\"IllegalToken\": 2,\n\t\t\"Error\":        7,\n\t\t\"NoAuthority\":  11,\n\t}\n)\n\nfunc (x Diagnosis_Code) Enum() *Diagnosis_Code {\n\tp := new(Diagnosis_Code)\n\t*p = x\n\treturn p\n}\n\nfunc (x Diagnosis_Code) String() string {\n\treturn protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))\n}\n\nfunc (Diagnosis_Code) Descriptor() protoreflect.EnumDescriptor {\n\treturn file_common_common_proto_enumTypes[0].Descriptor()\n}\n\nfunc (Diagnosis_Code) Type() protoreflect.EnumType {\n\treturn &file_common_common_proto_enumTypes[0]\n}\n\nfunc (x Diagnosis_Code) Number() protoreflect.EnumNumber {\n\treturn protoreflect.EnumNumber(x)\n}\n\n// Deprecated: Use Diagnosis_Code.Descriptor instead.\nfunc (Diagnosis_Code) EnumDescriptor() ([]byte, []int) {\n\treturn file_common_common_proto_rawDescGZIP(), []int{0, 0}\n}\n\ntype Diagnosis struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tCode Diagnosis_Code `protobuf:\"varint,1,opt,name=code,proto3,enum=common.Diagnosis_Code\" json:\"code,omitempty\"`\n\tMsg  string         `protobuf:\"bytes,2,opt,name=msg,proto3\" json:\"msg,omitempty\"`\n}\n\nfunc (x *Diagnosis) Reset() {\n\t*x = Diagnosis{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_common_common_proto_msgTypes[0]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *Diagnosis) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*Diagnosis) ProtoMessage() {}\n\nfunc (x *Diagnosis) ProtoReflect() protoreflect.Message {\n\tmi := &file_common_common_proto_msgTypes[0]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use Diagnosis.ProtoReflect.Descriptor instead.\nfunc (*Diagnosis) Descriptor() ([]byte, []int) {\n\treturn file_common_common_proto_rawDescGZIP(), []int{0}\n}\n\nfunc (x *Diagnosis) GetCode() Diagnosis_Code {\n\tif x != nil {\n\t\treturn x.Code\n\t}\n\treturn Diagnosis_SUCCESS\n}\n\nfunc (x *Diagnosis) GetMsg() string {\n\tif x != nil {\n\t\treturn x.Msg\n\t}\n\treturn \"\"\n}\n\nvar File_common_common_proto protoreflect.FileDescriptor\n\nvar file_common_common_proto_rawDesc = []byte{\n\t0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,\n\t0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8c, 0x01,\n\t0x0a, 0x09, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x12, 0x2a, 0x0a, 0x04, 0x63,\n\t0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,\n\t0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x2e, 0x43, 0x6f, 0x64,\n\t0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x02,\n\t0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0x41, 0x0a, 0x04, 0x43, 0x6f, 0x64,\n\t0x65, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x10,\n\t0x0a, 0x0c, 0x49, 0x6c, 0x6c, 0x65, 0x67, 0x61, 0x6c, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x10, 0x02,\n\t0x12, 0x09, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0x07, 0x12, 0x0f, 0x0a, 0x0b, 0x4e,\n\t0x6f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x10, 0x0b, 0x42, 0x0e, 0x5a, 0x0c,\n\t0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72,\n\t0x6f, 0x74, 0x6f, 0x33,\n}\n\nvar (\n\tfile_common_common_proto_rawDescOnce sync.Once\n\tfile_common_common_proto_rawDescData = file_common_common_proto_rawDesc\n)\n\nfunc file_common_common_proto_rawDescGZIP() []byte {\n\tfile_common_common_proto_rawDescOnce.Do(func() {\n\t\tfile_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_common_common_proto_rawDescData)\n\t})\n\treturn file_common_common_proto_rawDescData\n}\n\nvar file_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1)\nvar file_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 1)\nvar file_common_common_proto_goTypes = []interface{}{\n\t(Diagnosis_Code)(0), // 0: common.Diagnosis.Code\n\t(*Diagnosis)(nil),   // 1: common.Diagnosis\n}\nvar file_common_common_proto_depIdxs = []int32{\n\t0, // 0: common.Diagnosis.code:type_name -> common.Diagnosis.Code\n\t1, // [1:1] is the sub-list for method output_type\n\t1, // [1:1] is the sub-list for method input_type\n\t1, // [1:1] is the sub-list for extension type_name\n\t1, // [1:1] is the sub-list for extension extendee\n\t0, // [0:1] is the sub-list for field type_name\n}\n\nfunc init() { file_common_common_proto_init() }\nfunc file_common_common_proto_init() {\n\tif File_common_common_proto != nil {\n\t\treturn\n\t}\n\tif !protoimpl.UnsafeEnabled {\n\t\tfile_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*Diagnosis); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: file_common_common_proto_rawDesc,\n\t\t\tNumEnums:      1,\n\t\t\tNumMessages:   1,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   0,\n\t\t},\n\t\tGoTypes:           file_common_common_proto_goTypes,\n\t\tDependencyIndexes: file_common_common_proto_depIdxs,\n\t\tEnumInfos:         file_common_common_proto_enumTypes,\n\t\tMessageInfos:      file_common_common_proto_msgTypes,\n\t}.Build()\n\tFile_common_common_proto = out.File\n\tfile_common_common_proto_rawDesc = nil\n\tfile_common_common_proto_goTypes = nil\n\tfile_common_common_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "pkg/grpc/pb/common/common.proto",
    "content": "syntax = \"proto3\";\noption go_package = \"proto/common\";\npackage common;\n\n//protoc --proto_path=pkg/grpc/pb --go_out=pkg/grpc/pb --go_opt=paths=source_relative --go-grpc_out=pkg/grpc/pb  common/common.proto cloud/cloud.proto issue/issue.proto log/log.proto\n\nmessage Diagnosis {\n  Code code = 1;\n  string msg = 2;\n\n  enum Code {\n    SUCCESS = 0;\n    IllegalToken = 2;\n    Error = 7;\n    NoAuthority = 11;\n  }\n}\n"
  },
  {
    "path": "pkg/grpc/pb/issue/issue.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.28.1\n// \tprotoc        v3.21.8\n// source: issue.proto\n\npackage issue\n\nimport (\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\treflect \"reflect\"\n\tsync \"sync\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype UploadIssueStream_Severity int32\n\nconst (\n\tUploadIssueStream_UNKNOWN       UploadIssueStream_Severity = 0\n\tUploadIssueStream_INFORMATIONAL UploadIssueStream_Severity = 1\n\tUploadIssueStream_LOW           UploadIssueStream_Severity = 2\n\tUploadIssueStream_MEDIUM        UploadIssueStream_Severity = 3\n\tUploadIssueStream_HIGH          UploadIssueStream_Severity = 4\n\tUploadIssueStream_CRITICAL      UploadIssueStream_Severity = 5\n)\n\n// Enum value maps for UploadIssueStream_Severity.\nvar (\n\tUploadIssueStream_Severity_name = map[int32]string{\n\t\t0: \"UNKNOWN\",\n\t\t1: \"INFORMATIONAL\",\n\t\t2: \"LOW\",\n\t\t3: \"MEDIUM\",\n\t\t4: \"HIGH\",\n\t\t5: \"CRITICAL\",\n\t}\n\tUploadIssueStream_Severity_value = map[string]int32{\n\t\t\"UNKNOWN\":       0,\n\t\t\"INFORMATIONAL\": 1,\n\t\t\"LOW\":           2,\n\t\t\"MEDIUM\":        3,\n\t\t\"HIGH\":          4,\n\t\t\"CRITICAL\":      5,\n\t}\n)\n\nfunc (x UploadIssueStream_Severity) Enum() *UploadIssueStream_Severity {\n\tp := new(UploadIssueStream_Severity)\n\t*p = x\n\treturn p\n}\n\nfunc (x UploadIssueStream_Severity) String() string {\n\treturn protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))\n}\n\nfunc (UploadIssueStream_Severity) Descriptor() protoreflect.EnumDescriptor {\n\treturn file_issue_proto_enumTypes[0].Descriptor()\n}\n\nfunc (UploadIssueStream_Severity) Type() protoreflect.EnumType {\n\treturn &file_issue_proto_enumTypes[0]\n}\n\nfunc (x UploadIssueStream_Severity) Number() protoreflect.EnumNumber {\n\treturn protoreflect.EnumNumber(x)\n}\n\n// Deprecated: Use UploadIssueStream_Severity.Descriptor instead.\nfunc (UploadIssueStream_Severity) EnumDescriptor() ([]byte, []int) {\n\treturn file_issue_proto_rawDescGZIP(), []int{0, 0}\n}\n\ntype UploadIssueStream_Rule_Status int32\n\nconst (\n\tUploadIssueStream_Rule_UNKNOWN UploadIssueStream_Rule_Status = 0\n\tUploadIssueStream_Rule_SUCCESS UploadIssueStream_Rule_Status = 1\n\tUploadIssueStream_Rule_FAILED  UploadIssueStream_Rule_Status = 2\n)\n\n// Enum value maps for UploadIssueStream_Rule_Status.\nvar (\n\tUploadIssueStream_Rule_Status_name = map[int32]string{\n\t\t0: \"UNKNOWN\",\n\t\t1: \"SUCCESS\",\n\t\t2: \"FAILED\",\n\t}\n\tUploadIssueStream_Rule_Status_value = map[string]int32{\n\t\t\"UNKNOWN\": 0,\n\t\t\"SUCCESS\": 1,\n\t\t\"FAILED\":  2,\n\t}\n)\n\nfunc (x UploadIssueStream_Rule_Status) Enum() *UploadIssueStream_Rule_Status {\n\tp := new(UploadIssueStream_Rule_Status)\n\t*p = x\n\treturn p\n}\n\nfunc (x UploadIssueStream_Rule_Status) String() string {\n\treturn protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))\n}\n\nfunc (UploadIssueStream_Rule_Status) Descriptor() protoreflect.EnumDescriptor {\n\treturn file_issue_proto_enumTypes[1].Descriptor()\n}\n\nfunc (UploadIssueStream_Rule_Status) Type() protoreflect.EnumType {\n\treturn &file_issue_proto_enumTypes[1]\n}\n\nfunc (x UploadIssueStream_Rule_Status) Number() protoreflect.EnumNumber {\n\treturn protoreflect.EnumNumber(x)\n}\n\n// Deprecated: Use UploadIssueStream_Rule_Status.Descriptor instead.\nfunc (UploadIssueStream_Rule_Status) EnumDescriptor() ([]byte, []int) {\n\treturn file_issue_proto_rawDescGZIP(), []int{0, 5, 0}\n}\n\ntype UploadIssueStream struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *UploadIssueStream) Reset() {\n\t*x = UploadIssueStream{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_issue_proto_msgTypes[0]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadIssueStream) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadIssueStream) ProtoMessage() {}\n\nfunc (x *UploadIssueStream) ProtoReflect() protoreflect.Message {\n\tmi := &file_issue_proto_msgTypes[0]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadIssueStream.ProtoReflect.Descriptor instead.\nfunc (*UploadIssueStream) Descriptor() ([]byte, []int) {\n\treturn file_issue_proto_rawDescGZIP(), []int{0}\n}\n\ntype UploadIssueStream_Response struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *UploadIssueStream_Response) Reset() {\n\t*x = UploadIssueStream_Response{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_issue_proto_msgTypes[1]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadIssueStream_Response) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadIssueStream_Response) ProtoMessage() {}\n\nfunc (x *UploadIssueStream_Response) ProtoReflect() protoreflect.Message {\n\tmi := &file_issue_proto_msgTypes[1]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadIssueStream_Response.ProtoReflect.Descriptor instead.\nfunc (*UploadIssueStream_Response) Descriptor() ([]byte, []int) {\n\treturn file_issue_proto_rawDescGZIP(), []int{0, 0}\n}\n\ntype UploadIssueStream_Request struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tIndex    int32                       `protobuf:\"varint,1,opt,name=index,proto3\" json:\"index,omitempty\"`\n\tRule     *UploadIssueStream_Rule     `protobuf:\"bytes,2,opt,name=rule,proto3\" json:\"rule,omitempty\"`\n\tProvider *UploadIssueStream_Provider `protobuf:\"bytes,3,opt,name=provider,proto3\" json:\"provider,omitempty\"`\n\tModule   *UploadIssueStream_Module   `protobuf:\"bytes,4,opt,name=module,proto3\" json:\"module,omitempty\"`\n\t// i do not know how to name it...\n\tContext *UploadIssueStream_Context `protobuf:\"bytes,5,opt,name=context,proto3\" json:\"context,omitempty\"`\n}\n\nfunc (x *UploadIssueStream_Request) Reset() {\n\t*x = UploadIssueStream_Request{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_issue_proto_msgTypes[2]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadIssueStream_Request) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadIssueStream_Request) ProtoMessage() {}\n\nfunc (x *UploadIssueStream_Request) ProtoReflect() protoreflect.Message {\n\tmi := &file_issue_proto_msgTypes[2]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadIssueStream_Request.ProtoReflect.Descriptor instead.\nfunc (*UploadIssueStream_Request) Descriptor() ([]byte, []int) {\n\treturn file_issue_proto_rawDescGZIP(), []int{0, 1}\n}\n\nfunc (x *UploadIssueStream_Request) GetIndex() int32 {\n\tif x != nil {\n\t\treturn x.Index\n\t}\n\treturn 0\n}\n\nfunc (x *UploadIssueStream_Request) GetRule() *UploadIssueStream_Rule {\n\tif x != nil {\n\t\treturn x.Rule\n\t}\n\treturn nil\n}\n\nfunc (x *UploadIssueStream_Request) GetProvider() *UploadIssueStream_Provider {\n\tif x != nil {\n\t\treturn x.Provider\n\t}\n\treturn nil\n}\n\nfunc (x *UploadIssueStream_Request) GetModule() *UploadIssueStream_Module {\n\tif x != nil {\n\t\treturn x.Module\n\t}\n\treturn nil\n}\n\nfunc (x *UploadIssueStream_Request) GetContext() *UploadIssueStream_Context {\n\tif x != nil {\n\t\treturn x.Context\n\t}\n\treturn nil\n}\n\ntype UploadIssueStream_Context struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tSrcTableNames []string `protobuf:\"bytes,1,rep,name=src_table_names,json=srcTableNames,proto3\" json:\"src_table_names,omitempty\"`\n\t// use which one pg db schema\n\tSchema string `protobuf:\"bytes,2,opt,name=schema,proto3\" json:\"schema,omitempty\"`\n}\n\nfunc (x *UploadIssueStream_Context) Reset() {\n\t*x = UploadIssueStream_Context{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_issue_proto_msgTypes[3]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadIssueStream_Context) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadIssueStream_Context) ProtoMessage() {}\n\nfunc (x *UploadIssueStream_Context) ProtoReflect() protoreflect.Message {\n\tmi := &file_issue_proto_msgTypes[3]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadIssueStream_Context.ProtoReflect.Descriptor instead.\nfunc (*UploadIssueStream_Context) Descriptor() ([]byte, []int) {\n\treturn file_issue_proto_rawDescGZIP(), []int{0, 2}\n}\n\nfunc (x *UploadIssueStream_Context) GetSrcTableNames() []string {\n\tif x != nil {\n\t\treturn x.SrcTableNames\n\t}\n\treturn nil\n}\n\nfunc (x *UploadIssueStream_Context) GetSchema() string {\n\tif x != nil {\n\t\treturn x.Schema\n\t}\n\treturn \"\"\n}\n\ntype UploadIssueStream_Module struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tName             string   `protobuf:\"bytes,1,opt,name=name,proto3\" json:\"name,omitempty\"`\n\tSource           string   `protobuf:\"bytes,2,opt,name=source,proto3\" json:\"source,omitempty\"`\n\tDependenciesPath []string `protobuf:\"bytes,3,rep,name=dependencies_path,json=dependenciesPath,proto3\" json:\"dependencies_path,omitempty\"`\n}\n\nfunc (x *UploadIssueStream_Module) Reset() {\n\t*x = UploadIssueStream_Module{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_issue_proto_msgTypes[4]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadIssueStream_Module) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadIssueStream_Module) ProtoMessage() {}\n\nfunc (x *UploadIssueStream_Module) ProtoReflect() protoreflect.Message {\n\tmi := &file_issue_proto_msgTypes[4]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadIssueStream_Module.ProtoReflect.Descriptor instead.\nfunc (*UploadIssueStream_Module) Descriptor() ([]byte, []int) {\n\treturn file_issue_proto_rawDescGZIP(), []int{0, 3}\n}\n\nfunc (x *UploadIssueStream_Module) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Module) GetSource() string {\n\tif x != nil {\n\t\treturn x.Source\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Module) GetDependenciesPath() []string {\n\tif x != nil {\n\t\treturn x.DependenciesPath\n\t}\n\treturn nil\n}\n\ntype UploadIssueStream_Provider struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tName     string `protobuf:\"bytes,1,opt,name=name,proto3\" json:\"name,omitempty\"`\n\tProvider string `protobuf:\"bytes,2,opt,name=provider,proto3\" json:\"provider,omitempty\"`\n\tVersion  string `protobuf:\"bytes,3,opt,name=version,proto3\" json:\"version,omitempty\"`\n}\n\nfunc (x *UploadIssueStream_Provider) Reset() {\n\t*x = UploadIssueStream_Provider{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_issue_proto_msgTypes[5]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadIssueStream_Provider) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadIssueStream_Provider) ProtoMessage() {}\n\nfunc (x *UploadIssueStream_Provider) ProtoReflect() protoreflect.Message {\n\tmi := &file_issue_proto_msgTypes[5]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadIssueStream_Provider.ProtoReflect.Descriptor instead.\nfunc (*UploadIssueStream_Provider) Descriptor() ([]byte, []int) {\n\treturn file_issue_proto_rawDescGZIP(), []int{0, 4}\n}\n\nfunc (x *UploadIssueStream_Provider) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Provider) GetProvider() string {\n\tif x != nil {\n\t\treturn x.Provider\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Provider) GetVersion() string {\n\tif x != nil {\n\t\treturn x.Version\n\t}\n\treturn \"\"\n}\n\n// rule's file block\ntype UploadIssueStream_Rule struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tName string `protobuf:\"bytes,1,opt,name=name,proto3\" json:\"name,omitempty\"`\n\t// sql\n\tQuery  string            `protobuf:\"bytes,2,opt,name=query,proto3\" json:\"query,omitempty\"`\n\tLabels map[string]string `protobuf:\"bytes,3,rep,name=labels,proto3\" json:\"labels,omitempty\" protobuf_key:\"bytes,1,opt,name=key,proto3\" protobuf_val:\"bytes,2,opt,name=value,proto3\"`\n\t// rule's metadata\n\tMetadata *UploadIssueStream_Metadata `protobuf:\"bytes,4,opt,name=metadata,proto3\" json:\"metadata,omitempty\"`\n\tOutput   string                      `protobuf:\"bytes,5,opt,name=output,proto3\" json:\"output,omitempty\"`\n\t// rule's status : success / failed\n\tStatus UploadIssueStream_Rule_Status `protobuf:\"varint,6,opt,name=status,proto3,enum=issue.UploadIssueStream_Rule_Status\" json:\"status,omitempty\"`\n}\n\nfunc (x *UploadIssueStream_Rule) Reset() {\n\t*x = UploadIssueStream_Rule{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_issue_proto_msgTypes[6]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadIssueStream_Rule) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadIssueStream_Rule) ProtoMessage() {}\n\nfunc (x *UploadIssueStream_Rule) ProtoReflect() protoreflect.Message {\n\tmi := &file_issue_proto_msgTypes[6]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadIssueStream_Rule.ProtoReflect.Descriptor instead.\nfunc (*UploadIssueStream_Rule) Descriptor() ([]byte, []int) {\n\treturn file_issue_proto_rawDescGZIP(), []int{0, 5}\n}\n\nfunc (x *UploadIssueStream_Rule) GetName() string {\n\tif x != nil {\n\t\treturn x.Name\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Rule) GetQuery() string {\n\tif x != nil {\n\t\treturn x.Query\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Rule) GetLabels() map[string]string {\n\tif x != nil {\n\t\treturn x.Labels\n\t}\n\treturn nil\n}\n\nfunc (x *UploadIssueStream_Rule) GetMetadata() *UploadIssueStream_Metadata {\n\tif x != nil {\n\t\treturn x.Metadata\n\t}\n\treturn nil\n}\n\nfunc (x *UploadIssueStream_Rule) GetOutput() string {\n\tif x != nil {\n\t\treturn x.Output\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Rule) GetStatus() UploadIssueStream_Rule_Status {\n\tif x != nil {\n\t\treturn x.Status\n\t}\n\treturn UploadIssueStream_Rule_UNKNOWN\n}\n\n// rule's metadata\ntype UploadIssueStream_Metadata struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tAuthor      string                     `protobuf:\"bytes,1,opt,name=author,proto3\" json:\"author,omitempty\"`\n\tDescription string                     `protobuf:\"bytes,2,opt,name=description,proto3\" json:\"description,omitempty\"`\n\tId          string                     `protobuf:\"bytes,3,opt,name=id,proto3\" json:\"id,omitempty\"`\n\tProvider    string                     `protobuf:\"bytes,4,opt,name=provider,proto3\" json:\"provider,omitempty\"`\n\tRemediation string                     `protobuf:\"bytes,5,opt,name=remediation,proto3\" json:\"remediation,omitempty\"`\n\tSeverity    UploadIssueStream_Severity `protobuf:\"varint,6,opt,name=severity,proto3,enum=issue.UploadIssueStream_Severity\" json:\"severity,omitempty\"`\n\tTags        []string                   `protobuf:\"bytes,7,rep,name=tags,proto3\" json:\"tags,omitempty\"`\n\tTitle       string                     `protobuf:\"bytes,8,opt,name=title,proto3\" json:\"title,omitempty\"`\n}\n\nfunc (x *UploadIssueStream_Metadata) Reset() {\n\t*x = UploadIssueStream_Metadata{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_issue_proto_msgTypes[7]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadIssueStream_Metadata) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadIssueStream_Metadata) ProtoMessage() {}\n\nfunc (x *UploadIssueStream_Metadata) ProtoReflect() protoreflect.Message {\n\tmi := &file_issue_proto_msgTypes[7]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadIssueStream_Metadata.ProtoReflect.Descriptor instead.\nfunc (*UploadIssueStream_Metadata) Descriptor() ([]byte, []int) {\n\treturn file_issue_proto_rawDescGZIP(), []int{0, 6}\n}\n\nfunc (x *UploadIssueStream_Metadata) GetAuthor() string {\n\tif x != nil {\n\t\treturn x.Author\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Metadata) GetDescription() string {\n\tif x != nil {\n\t\treturn x.Description\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Metadata) GetId() string {\n\tif x != nil {\n\t\treturn x.Id\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Metadata) GetProvider() string {\n\tif x != nil {\n\t\treturn x.Provider\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Metadata) GetRemediation() string {\n\tif x != nil {\n\t\treturn x.Remediation\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadIssueStream_Metadata) GetSeverity() UploadIssueStream_Severity {\n\tif x != nil {\n\t\treturn x.Severity\n\t}\n\treturn UploadIssueStream_UNKNOWN\n}\n\nfunc (x *UploadIssueStream_Metadata) GetTags() []string {\n\tif x != nil {\n\t\treturn x.Tags\n\t}\n\treturn nil\n}\n\nfunc (x *UploadIssueStream_Metadata) GetTitle() string {\n\tif x != nil {\n\t\treturn x.Title\n\t}\n\treturn \"\"\n}\n\nvar File_issue_proto protoreflect.FileDescriptor\n\nvar file_issue_proto_rawDesc = []byte{\n\t0x0a, 0x0b, 0x69, 0x73, 0x73, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x69,\n\t0x73, 0x73, 0x75, 0x65, 0x22, 0xf9, 0x09, 0x0a, 0x11, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49,\n\t0x73, 0x73, 0x75, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x1a, 0x0a, 0x0a, 0x08, 0x52, 0x65,\n\t0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x86, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65,\n\t0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28,\n\t0x05, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x31, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65,\n\t0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x73, 0x73, 0x75, 0x65, 0x2e, 0x55,\n\t0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x73, 0x73, 0x75, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,\n\t0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x12, 0x3d, 0x0a, 0x08, 0x70,\n\t0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,\n\t0x69, 0x73, 0x73, 0x75, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x73, 0x73, 0x75,\n\t0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72,\n\t0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x06, 0x6d, 0x6f,\n\t0x64, 0x75, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x73, 0x73,\n\t0x75, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x73, 0x73, 0x75, 0x65, 0x53, 0x74,\n\t0x72, 0x65, 0x61, 0x6d, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x06, 0x6d, 0x6f, 0x64,\n\t0x75, 0x6c, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05,\n\t0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x73, 0x73, 0x75, 0x65, 0x2e, 0x55, 0x70, 0x6c,\n\t0x6f, 0x61, 0x64, 0x49, 0x73, 0x73, 0x75, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x43,\n\t0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x1a,\n\t0x49, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x72,\n\t0x63, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20,\n\t0x03, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x72, 0x63, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d,\n\t0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01,\n\t0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x61, 0x0a, 0x06, 0x4d, 0x6f,\n\t0x64, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,\n\t0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72,\n\t0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,\n\t0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73,\n\t0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x70,\n\t0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x50, 0x61, 0x74, 0x68, 0x1a, 0x54, 0x0a,\n\t0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,\n\t0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a,\n\t0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,\n\t0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72,\n\t0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,\n\t0x69, 0x6f, 0x6e, 0x1a, 0xf3, 0x02, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04,\n\t0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,\n\t0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,\n\t0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x41, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,\n\t0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x69, 0x73, 0x73, 0x75, 0x65, 0x2e, 0x55,\n\t0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x73, 0x73, 0x75, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,\n\t0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72,\n\t0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x08, 0x6d, 0x65, 0x74,\n\t0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x73,\n\t0x73, 0x75, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x73, 0x73, 0x75, 0x65, 0x53,\n\t0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08,\n\t0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70,\n\t0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74,\n\t0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,\n\t0x32, 0x24, 0x2e, 0x69, 0x73, 0x73, 0x75, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49,\n\t0x73, 0x73, 0x75, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e,\n\t0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x39,\n\t0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,\n\t0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,\n\t0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,\n\t0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x06, 0x53, 0x74, 0x61,\n\t0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,\n\t0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0a, 0x0a,\n\t0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x1a, 0xfb, 0x01, 0x0a, 0x08, 0x4d, 0x65,\n\t0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,\n\t0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x12, 0x20,\n\t0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,\n\t0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,\n\t0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64,\n\t0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01,\n\t0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b,\n\t0x72, 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28,\n\t0x09, 0x52, 0x0b, 0x72, 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d,\n\t0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,\n\t0x32, 0x21, 0x2e, 0x69, 0x73, 0x73, 0x75, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49,\n\t0x73, 0x73, 0x75, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72,\n\t0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a,\n\t0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67,\n\t0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,\n\t0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x22, 0x57, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72,\n\t0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,\n\t0x12, 0x11, 0x0a, 0x0d, 0x49, 0x4e, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x41,\n\t0x4c, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x4c, 0x4f, 0x57, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06,\n\t0x4d, 0x45, 0x44, 0x49, 0x55, 0x4d, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x49, 0x47, 0x48,\n\t0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x05,\n\t0x32, 0x65, 0x0a, 0x05, 0x49, 0x73, 0x73, 0x75, 0x65, 0x12, 0x5c, 0x0a, 0x11, 0x55, 0x70, 0x6c,\n\t0x6f, 0x61, 0x64, 0x49, 0x73, 0x73, 0x75, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x20,\n\t0x2e, 0x69, 0x73, 0x73, 0x75, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x73, 0x73,\n\t0x75, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,\n\t0x1a, 0x21, 0x2e, 0x69, 0x73, 0x73, 0x75, 0x65, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49,\n\t0x73, 0x73, 0x75, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f,\n\t0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75,\n\t0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x65, 0x6c, 0x65, 0x66, 0x72, 0x61, 0x2f, 0x73, 0x65,\n\t0x6c, 0x65, 0x66, 0x72, 0x61, 0x2d, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f,\n\t0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x62, 0x2f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x3b, 0x69, 0x73,\n\t0x73, 0x75, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,\n}\n\nvar (\n\tfile_issue_proto_rawDescOnce sync.Once\n\tfile_issue_proto_rawDescData = file_issue_proto_rawDesc\n)\n\nfunc file_issue_proto_rawDescGZIP() []byte {\n\tfile_issue_proto_rawDescOnce.Do(func() {\n\t\tfile_issue_proto_rawDescData = protoimpl.X.CompressGZIP(file_issue_proto_rawDescData)\n\t})\n\treturn file_issue_proto_rawDescData\n}\n\nvar file_issue_proto_enumTypes = make([]protoimpl.EnumInfo, 2)\nvar file_issue_proto_msgTypes = make([]protoimpl.MessageInfo, 9)\nvar file_issue_proto_goTypes = []interface{}{\n\t(UploadIssueStream_Severity)(0),    // 0: issue.UploadIssueStream.Severity\n\t(UploadIssueStream_Rule_Status)(0), // 1: issue.UploadIssueStream.Rule.Status\n\t(*UploadIssueStream)(nil),          // 2: issue.UploadIssueStream\n\t(*UploadIssueStream_Response)(nil), // 3: issue.UploadIssueStream.Response\n\t(*UploadIssueStream_Request)(nil),  // 4: issue.UploadIssueStream.Request\n\t(*UploadIssueStream_Context)(nil),  // 5: issue.UploadIssueStream.Context\n\t(*UploadIssueStream_Module)(nil),   // 6: issue.UploadIssueStream.Module\n\t(*UploadIssueStream_Provider)(nil), // 7: issue.UploadIssueStream.Provider\n\t(*UploadIssueStream_Rule)(nil),     // 8: issue.UploadIssueStream.Rule\n\t(*UploadIssueStream_Metadata)(nil), // 9: issue.UploadIssueStream.Metadata\n\tnil,                                // 10: issue.UploadIssueStream.Rule.LabelsEntry\n}\nvar file_issue_proto_depIdxs = []int32{\n\t8,  // 0: issue.UploadIssueStream.Request.rule:type_name -> issue.UploadIssueStream.Rule\n\t7,  // 1: issue.UploadIssueStream.Request.provider:type_name -> issue.UploadIssueStream.Provider\n\t6,  // 2: issue.UploadIssueStream.Request.module:type_name -> issue.UploadIssueStream.Module\n\t5,  // 3: issue.UploadIssueStream.Request.context:type_name -> issue.UploadIssueStream.Context\n\t10, // 4: issue.UploadIssueStream.Rule.labels:type_name -> issue.UploadIssueStream.Rule.LabelsEntry\n\t9,  // 5: issue.UploadIssueStream.Rule.metadata:type_name -> issue.UploadIssueStream.Metadata\n\t1,  // 6: issue.UploadIssueStream.Rule.status:type_name -> issue.UploadIssueStream.Rule.Status\n\t0,  // 7: issue.UploadIssueStream.Metadata.severity:type_name -> issue.UploadIssueStream.Severity\n\t4,  // 8: issue.Issue.UploadIssueStream:input_type -> issue.UploadIssueStream.Request\n\t3,  // 9: issue.Issue.UploadIssueStream:output_type -> issue.UploadIssueStream.Response\n\t9,  // [9:10] is the sub-list for method output_type\n\t8,  // [8:9] is the sub-list for method input_type\n\t8,  // [8:8] is the sub-list for extension type_name\n\t8,  // [8:8] is the sub-list for extension extendee\n\t0,  // [0:8] is the sub-list for field type_name\n}\n\nfunc init() { file_issue_proto_init() }\nfunc file_issue_proto_init() {\n\tif File_issue_proto != nil {\n\t\treturn\n\t}\n\tif !protoimpl.UnsafeEnabled {\n\t\tfile_issue_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadIssueStream); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_issue_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadIssueStream_Response); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_issue_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadIssueStream_Request); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_issue_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadIssueStream_Context); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_issue_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadIssueStream_Module); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_issue_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadIssueStream_Provider); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_issue_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadIssueStream_Rule); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_issue_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadIssueStream_Metadata); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: file_issue_proto_rawDesc,\n\t\t\tNumEnums:      2,\n\t\t\tNumMessages:   9,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   1,\n\t\t},\n\t\tGoTypes:           file_issue_proto_goTypes,\n\t\tDependencyIndexes: file_issue_proto_depIdxs,\n\t\tEnumInfos:         file_issue_proto_enumTypes,\n\t\tMessageInfos:      file_issue_proto_msgTypes,\n\t}.Build()\n\tFile_issue_proto = out.File\n\tfile_issue_proto_rawDesc = nil\n\tfile_issue_proto_goTypes = nil\n\tfile_issue_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "pkg/grpc/pb/issue/issue.proto",
    "content": "syntax = \"proto3\";\noption go_package = \"github.com/selefra/selefra-cloud/pkg/grpc/pb/issue;issue\";\n\npackage issue;\n// protoc --proto_path=pkg/grpc/pb/issue --go_out=pkg/grpc/pb/issue --go_opt=paths=source_relative --go-grpc_out=pkg/grpc/pb/issue --go-grpc_opt=paths=source_relative  issue.proto\n\n\nmessage UploadIssueStream {\n\n  message Response {\n  }\n\n  message Request {\n\n    int32 index = 1;\n\n    Rule rule = 2;\n\n    Provider provider = 3;\n\n    Module module = 4;\n\n    // i do not know how to name it...\n    Context context = 5;\n\n  }\n\n  message Context {\n    repeated string src_table_names = 1;\n    // use which one pg db schema\n    string schema = 2;\n  }\n\n  message Module {\n    string name = 1;\n    string source = 2;\n    repeated string dependencies_path = 3;\n  }\n\n  message Provider {\n    string name = 1;\n    string provider = 2;\n    string version = 3;\n  }\n\n  // rule's file block\n  message Rule {\n\n    string name = 1;\n\n    // sql\n    string query = 2;\n\n    map<string, string> labels = 3;\n\n    // rule's metadata\n    Metadata metadata = 4;\n\n    string output = 5;\n    // rule's status : success / failed\n    Status status = 6;\n    enum Status{\n      UNKNOWN = 0;\n      SUCCESS = 1;\n      FAILED = 2;\n    }\n  }\n\n  // rule's metadata\n  message Metadata {\n\n    string author = 1;\n\n    string description = 2;\n\n    string id = 3;\n\n    string provider = 4;\n\n    string remediation = 5;\n\n    Severity severity = 6;\n\n    repeated string tags = 7;\n\n    string title = 8;\n\n  }\n\n\n  enum Severity {\n    UNKNOWN = 0;\n    INFORMATIONAL = 1;\n    LOW = 2;\n    MEDIUM = 3;\n    HIGH = 4;\n    CRITICAL = 5;\n  }\n}\n\n\n/*\n  token: token\n  projectName: projectName\n  taskUUID: taskUUID\n */\nservice Issue {\n  rpc UploadIssueStream (stream UploadIssueStream.Request) returns (UploadIssueStream.Response) {};\n}"
  },
  {
    "path": "pkg/grpc/pb/issue/issue_grpc.pb.go",
    "content": "// Code generated by protoc-gen-go-grpc. DO NOT EDIT.\n// versions:\n// - protoc-gen-go-grpc v1.2.0\n// - protoc             v3.21.8\n// source: issue.proto\n\npackage issue\n\nimport (\n\tcontext \"context\"\n\tgrpc \"google.golang.org/grpc\"\n\tcodes \"google.golang.org/grpc/codes\"\n\tstatus \"google.golang.org/grpc/status\"\n)\n\n// This is a compile-time assertion to ensure that this generated file\n// is compatible with the grpc package it is being compiled against.\n// Requires gRPC-Go v1.32.0 or later.\nconst _ = grpc.SupportPackageIsVersion7\n\n// IssueClient is the client API for Issue service.\n//\n// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.\ntype IssueClient interface {\n\tUploadIssueStream(ctx context.Context, opts ...grpc.CallOption) (Issue_UploadIssueStreamClient, error)\n}\n\ntype issueClient struct {\n\tcc grpc.ClientConnInterface\n}\n\nfunc NewIssueClient(cc grpc.ClientConnInterface) IssueClient {\n\treturn &issueClient{cc}\n}\n\nfunc (c *issueClient) UploadIssueStream(ctx context.Context, opts ...grpc.CallOption) (Issue_UploadIssueStreamClient, error) {\n\tstream, err := c.cc.NewStream(ctx, &Issue_ServiceDesc.Streams[0], \"/issue.Issue/UploadIssueStream\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &issueUploadIssueStreamClient{stream}\n\treturn x, nil\n}\n\ntype Issue_UploadIssueStreamClient interface {\n\tSend(*UploadIssueStream_Request) error\n\tCloseAndRecv() (*UploadIssueStream_Response, error)\n\tgrpc.ClientStream\n}\n\ntype issueUploadIssueStreamClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *issueUploadIssueStreamClient) Send(m *UploadIssueStream_Request) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *issueUploadIssueStreamClient) CloseAndRecv() (*UploadIssueStream_Response, error) {\n\tif err := x.ClientStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\tm := new(UploadIssueStream_Response)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// IssueServer is the server API for Issue service.\n// All implementations must embed UnimplementedIssueServer\n// for forward compatibility\ntype IssueServer interface {\n\tUploadIssueStream(Issue_UploadIssueStreamServer) error\n\tmustEmbedUnimplementedIssueServer()\n}\n\n// UnimplementedIssueServer must be embedded to have forward compatible implementations.\ntype UnimplementedIssueServer struct {\n}\n\nfunc (UnimplementedIssueServer) UploadIssueStream(Issue_UploadIssueStreamServer) error {\n\treturn status.Errorf(codes.Unimplemented, \"method UploadIssueStream not implemented\")\n}\nfunc (UnimplementedIssueServer) mustEmbedUnimplementedIssueServer() {}\n\n// UnsafeIssueServer may be embedded to opt out of forward compatibility for this service.\n// Use of this interface is not recommended, as added methods to IssueServer will\n// result in compilation errors.\ntype UnsafeIssueServer interface {\n\tmustEmbedUnimplementedIssueServer()\n}\n\nfunc RegisterIssueServer(s grpc.ServiceRegistrar, srv IssueServer) {\n\ts.RegisterService(&Issue_ServiceDesc, srv)\n}\n\nfunc _Issue_UploadIssueStream_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(IssueServer).UploadIssueStream(&issueUploadIssueStreamServer{stream})\n}\n\ntype Issue_UploadIssueStreamServer interface {\n\tSendAndClose(*UploadIssueStream_Response) error\n\tRecv() (*UploadIssueStream_Request, error)\n\tgrpc.ServerStream\n}\n\ntype issueUploadIssueStreamServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *issueUploadIssueStreamServer) SendAndClose(m *UploadIssueStream_Response) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *issueUploadIssueStreamServer) Recv() (*UploadIssueStream_Request, error) {\n\tm := new(UploadIssueStream_Request)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\n// Issue_ServiceDesc is the grpc.ServiceDesc for Issue service.\n// It's only intended for direct use with grpc.RegisterService,\n// and not to be introspected or modified (even as a copy)\nvar Issue_ServiceDesc = grpc.ServiceDesc{\n\tServiceName: \"issue.Issue\",\n\tHandlerType: (*IssueServer)(nil),\n\tMethods:     []grpc.MethodDesc{},\n\tStreams: []grpc.StreamDesc{\n\t\t{\n\t\t\tStreamName:    \"UploadIssueStream\",\n\t\t\tHandler:       _Issue_UploadIssueStream_Handler,\n\t\t\tClientStreams: true,\n\t\t},\n\t},\n\tMetadata: \"issue.proto\",\n}\n"
  },
  {
    "path": "pkg/grpc/pb/log/log.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.28.1\n// \tprotoc        v3.21.8\n// source: log/log.proto\n\npackage log\n\nimport (\n\t\"github.com/selefra/selefra/pkg/grpc/pb/common\"\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\ttimestamppb \"google.golang.org/protobuf/types/known/timestamppb\"\n\treflect \"reflect\"\n\tsync \"sync\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype StageType int32\n\nconst (\n\tStageType_STAGE_TYPE_INITIALIZING            StageType = 0\n\tStageType_STAGE_TYPE_PULL_INFRASTRUCTURE     StageType = 1\n\tStageType_STAGE_TYPE_INFRASTRUCTURE_ANALYSIS StageType = 2\n)\n\n// Enum value maps for StageType.\nvar (\n\tStageType_name = map[int32]string{\n\t\t0: \"STAGE_TYPE_INITIALIZING\",\n\t\t1: \"STAGE_TYPE_PULL_INFRASTRUCTURE\",\n\t\t2: \"STAGE_TYPE_INFRASTRUCTURE_ANALYSIS\",\n\t}\n\tStageType_value = map[string]int32{\n\t\t\"STAGE_TYPE_INITIALIZING\":            0,\n\t\t\"STAGE_TYPE_PULL_INFRASTRUCTURE\":     1,\n\t\t\"STAGE_TYPE_INFRASTRUCTURE_ANALYSIS\": 2,\n\t}\n)\n\nfunc (x StageType) Enum() *StageType {\n\tp := new(StageType)\n\t*p = x\n\treturn p\n}\n\nfunc (x StageType) String() string {\n\treturn protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))\n}\n\nfunc (StageType) Descriptor() protoreflect.EnumDescriptor {\n\treturn file_log_log_proto_enumTypes[0].Descriptor()\n}\n\nfunc (StageType) Type() protoreflect.EnumType {\n\treturn &file_log_log_proto_enumTypes[0]\n}\n\nfunc (x StageType) Number() protoreflect.EnumNumber {\n\treturn protoreflect.EnumNumber(x)\n}\n\n// Deprecated: Use StageType.Descriptor instead.\nfunc (StageType) EnumDescriptor() ([]byte, []int) {\n\treturn file_log_log_proto_rawDescGZIP(), []int{0}\n}\n\ntype Status int32\n\nconst (\n\tStatus_STATUS_SUCCESS Status = 0\n\tStatus_STATUS_FAILED  Status = 1\n)\n\n// Enum value maps for Status.\nvar (\n\tStatus_name = map[int32]string{\n\t\t0: \"STATUS_SUCCESS\",\n\t\t1: \"STATUS_FAILED\",\n\t}\n\tStatus_value = map[string]int32{\n\t\t\"STATUS_SUCCESS\": 0,\n\t\t\"STATUS_FAILED\":  1,\n\t}\n)\n\nfunc (x Status) Enum() *Status {\n\tp := new(Status)\n\t*p = x\n\treturn p\n}\n\nfunc (x Status) String() string {\n\treturn protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))\n}\n\nfunc (Status) Descriptor() protoreflect.EnumDescriptor {\n\treturn file_log_log_proto_enumTypes[1].Descriptor()\n}\n\nfunc (Status) Type() protoreflect.EnumType {\n\treturn &file_log_log_proto_enumTypes[1]\n}\n\nfunc (x Status) Number() protoreflect.EnumNumber {\n\treturn protoreflect.EnumNumber(x)\n}\n\n// Deprecated: Use Status.Descriptor instead.\nfunc (Status) EnumDescriptor() ([]byte, []int) {\n\treturn file_log_log_proto_rawDescGZIP(), []int{1}\n}\n\ntype Level int32\n\nconst (\n\tLevel_LEVEL_DEBUG Level = 0\n\tLevel_LEVEL_INFO  Level = 1\n\tLevel_LEVEL_WARN  Level = 2\n\tLevel_LEVEL_ERROR Level = 3\n\tLevel_LEVEL_FATAL Level = 4\n)\n\n// Enum value maps for Level.\nvar (\n\tLevel_name = map[int32]string{\n\t\t0: \"LEVEL_DEBUG\",\n\t\t1: \"LEVEL_INFO\",\n\t\t2: \"LEVEL_WARN\",\n\t\t3: \"LEVEL_ERROR\",\n\t\t4: \"LEVEL_FATAL\",\n\t}\n\tLevel_value = map[string]int32{\n\t\t\"LEVEL_DEBUG\": 0,\n\t\t\"LEVEL_INFO\":  1,\n\t\t\"LEVEL_WARN\":  2,\n\t\t\"LEVEL_ERROR\": 3,\n\t\t\"LEVEL_FATAL\": 4,\n\t}\n)\n\nfunc (x Level) Enum() *Level {\n\tp := new(Level)\n\t*p = x\n\treturn p\n}\n\nfunc (x Level) String() string {\n\treturn protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))\n}\n\nfunc (Level) Descriptor() protoreflect.EnumDescriptor {\n\treturn file_log_log_proto_enumTypes[2].Descriptor()\n}\n\nfunc (Level) Type() protoreflect.EnumType {\n\treturn &file_log_log_proto_enumTypes[2]\n}\n\nfunc (x Level) Number() protoreflect.EnumNumber {\n\treturn protoreflect.EnumNumber(x)\n}\n\n// Deprecated: Use Level.Descriptor instead.\nfunc (Level) EnumDescriptor() ([]byte, []int) {\n\treturn file_log_log_proto_rawDescGZIP(), []int{2}\n}\n\ntype UploadLogStream struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *UploadLogStream) Reset() {\n\t*x = UploadLogStream{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_log_log_proto_msgTypes[0]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadLogStream) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadLogStream) ProtoMessage() {}\n\nfunc (x *UploadLogStream) ProtoReflect() protoreflect.Message {\n\tmi := &file_log_log_proto_msgTypes[0]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadLogStream.ProtoReflect.Descriptor instead.\nfunc (*UploadLogStream) Descriptor() ([]byte, []int) {\n\treturn file_log_log_proto_rawDescGZIP(), []int{0}\n}\n\ntype UploadLogStatus struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *UploadLogStatus) Reset() {\n\t*x = UploadLogStatus{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_log_log_proto_msgTypes[1]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadLogStatus) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadLogStatus) ProtoMessage() {}\n\nfunc (x *UploadLogStatus) ProtoReflect() protoreflect.Message {\n\tmi := &file_log_log_proto_msgTypes[1]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadLogStatus.ProtoReflect.Descriptor instead.\nfunc (*UploadLogStatus) Descriptor() ([]byte, []int) {\n\treturn file_log_log_proto_rawDescGZIP(), []int{1}\n}\n\ntype UploadLogStream_Request struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tStage StageType `protobuf:\"varint,1,opt,name=stage,proto3,enum=log.StageType\" json:\"stage,omitempty\"`\n\t// log id, task uniq\n\tIndex uint64 `protobuf:\"varint,2,opt,name=index,proto3\" json:\"index,omitempty\"`\n\tMsg   string `protobuf:\"bytes,3,opt,name=msg,proto3\" json:\"msg,omitempty\"`\n\tLevel Level  `protobuf:\"varint,4,opt,name=level,proto3,enum=log.Level\" json:\"level,omitempty\"`\n\t// log product time\n\tTime *timestamppb.Timestamp `protobuf:\"bytes,5,opt,name=time,proto3\" json:\"time,omitempty\"`\n}\n\nfunc (x *UploadLogStream_Request) Reset() {\n\t*x = UploadLogStream_Request{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_log_log_proto_msgTypes[2]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadLogStream_Request) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadLogStream_Request) ProtoMessage() {}\n\nfunc (x *UploadLogStream_Request) ProtoReflect() protoreflect.Message {\n\tmi := &file_log_log_proto_msgTypes[2]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadLogStream_Request.ProtoReflect.Descriptor instead.\nfunc (*UploadLogStream_Request) Descriptor() ([]byte, []int) {\n\treturn file_log_log_proto_rawDescGZIP(), []int{0, 0}\n}\n\nfunc (x *UploadLogStream_Request) GetStage() StageType {\n\tif x != nil {\n\t\treturn x.Stage\n\t}\n\treturn StageType_STAGE_TYPE_INITIALIZING\n}\n\nfunc (x *UploadLogStream_Request) GetIndex() uint64 {\n\tif x != nil {\n\t\treturn x.Index\n\t}\n\treturn 0\n}\n\nfunc (x *UploadLogStream_Request) GetMsg() string {\n\tif x != nil {\n\t\treturn x.Msg\n\t}\n\treturn \"\"\n}\n\nfunc (x *UploadLogStream_Request) GetLevel() Level {\n\tif x != nil {\n\t\treturn x.Level\n\t}\n\treturn Level_LEVEL_DEBUG\n}\n\nfunc (x *UploadLogStream_Request) GetTime() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.Time\n\t}\n\treturn nil\n}\n\ntype UploadLogStream_Response struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n}\n\nfunc (x *UploadLogStream_Response) Reset() {\n\t*x = UploadLogStream_Response{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_log_log_proto_msgTypes[3]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadLogStream_Response) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadLogStream_Response) ProtoMessage() {}\n\nfunc (x *UploadLogStream_Response) ProtoReflect() protoreflect.Message {\n\tmi := &file_log_log_proto_msgTypes[3]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadLogStream_Response.ProtoReflect.Descriptor instead.\nfunc (*UploadLogStream_Response) Descriptor() ([]byte, []int) {\n\treturn file_log_log_proto_rawDescGZIP(), []int{0, 1}\n}\n\ntype UploadLogStatus_Request struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tStage  StageType `protobuf:\"varint,1,opt,name=stage,proto3,enum=log.StageType\" json:\"stage,omitempty\"`\n\tStatus Status    `protobuf:\"varint,2,opt,name=status,proto3,enum=log.Status\" json:\"status,omitempty\"`\n\t// status change time\n\tTime *timestamppb.Timestamp `protobuf:\"bytes,3,opt,name=time,proto3\" json:\"time,omitempty\"`\n}\n\nfunc (x *UploadLogStatus_Request) Reset() {\n\t*x = UploadLogStatus_Request{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_log_log_proto_msgTypes[4]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadLogStatus_Request) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadLogStatus_Request) ProtoMessage() {}\n\nfunc (x *UploadLogStatus_Request) ProtoReflect() protoreflect.Message {\n\tmi := &file_log_log_proto_msgTypes[4]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadLogStatus_Request.ProtoReflect.Descriptor instead.\nfunc (*UploadLogStatus_Request) Descriptor() ([]byte, []int) {\n\treturn file_log_log_proto_rawDescGZIP(), []int{1, 0}\n}\n\nfunc (x *UploadLogStatus_Request) GetStage() StageType {\n\tif x != nil {\n\t\treturn x.Stage\n\t}\n\treturn StageType_STAGE_TYPE_INITIALIZING\n}\n\nfunc (x *UploadLogStatus_Request) GetStatus() Status {\n\tif x != nil {\n\t\treturn x.Status\n\t}\n\treturn Status_STATUS_SUCCESS\n}\n\nfunc (x *UploadLogStatus_Request) GetTime() *timestamppb.Timestamp {\n\tif x != nil {\n\t\treturn x.Time\n\t}\n\treturn nil\n}\n\ntype UploadLogStatus_Response struct {\n\tstate         protoimpl.MessageState\n\tsizeCache     protoimpl.SizeCache\n\tunknownFields protoimpl.UnknownFields\n\n\tDiagnosis *common.Diagnosis `protobuf:\"bytes,1,opt,name=diagnosis,proto3\" json:\"diagnosis,omitempty\"`\n}\n\nfunc (x *UploadLogStatus_Response) Reset() {\n\t*x = UploadLogStatus_Response{}\n\tif protoimpl.UnsafeEnabled {\n\t\tmi := &file_log_log_proto_msgTypes[5]\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tms.StoreMessageInfo(mi)\n\t}\n}\n\nfunc (x *UploadLogStatus_Response) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*UploadLogStatus_Response) ProtoMessage() {}\n\nfunc (x *UploadLogStatus_Response) ProtoReflect() protoreflect.Message {\n\tmi := &file_log_log_proto_msgTypes[5]\n\tif protoimpl.UnsafeEnabled && x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use UploadLogStatus_Response.ProtoReflect.Descriptor instead.\nfunc (*UploadLogStatus_Response) Descriptor() ([]byte, []int) {\n\treturn file_log_log_proto_rawDescGZIP(), []int{1, 1}\n}\n\nfunc (x *UploadLogStatus_Response) GetDiagnosis() *common.Diagnosis {\n\tif x != nil {\n\t\treturn x.Diagnosis\n\t}\n\treturn nil\n}\n\nvar File_log_log_proto protoreflect.FileDescriptor\n\nvar file_log_log_proto_rawDesc = []byte{\n\t0x0a, 0x0d, 0x6c, 0x6f, 0x67, 0x2f, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,\n\t0x03, 0x6c, 0x6f, 0x67, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,\n\t0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,\n\t0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x13, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f,\n\t0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc9, 0x01, 0x0a, 0x0f, 0x55,\n\t0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x1a, 0xa9,\n\t0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x05, 0x73, 0x74,\n\t0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x2e,\n\t0x53, 0x74, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65,\n\t0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52,\n\t0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x03, 0x20,\n\t0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x12, 0x20, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65,\n\t0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x6c, 0x6f, 0x67, 0x2e, 0x4c, 0x65,\n\t0x76, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69,\n\t0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,\n\t0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,\n\t0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x0a, 0x0a, 0x08, 0x52, 0x65,\n\t0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd5, 0x01, 0x0a, 0x0f, 0x55, 0x70, 0x6c, 0x6f, 0x61,\n\t0x64, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x07, 0x52,\n\t0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18,\n\t0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x67,\n\t0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x06,\n\t0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x6c,\n\t0x6f, 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,\n\t0x73, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,\n\t0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,\n\t0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d,\n\t0x65, 0x1a, 0x3b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a,\n\t0x09, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,\n\t0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f,\n\t0x73, 0x69, 0x73, 0x52, 0x09, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x69, 0x73, 0x2a, 0x74,\n\t0x0a, 0x09, 0x53, 0x74, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x53,\n\t0x54, 0x41, 0x47, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41,\n\t0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x22, 0x0a, 0x1e, 0x53, 0x54, 0x41, 0x47,\n\t0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x50, 0x55, 0x4c, 0x4c, 0x5f, 0x49, 0x4e, 0x46, 0x52,\n\t0x41, 0x53, 0x54, 0x52, 0x55, 0x43, 0x54, 0x55, 0x52, 0x45, 0x10, 0x01, 0x12, 0x26, 0x0a, 0x22,\n\t0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x46, 0x52, 0x41,\n\t0x53, 0x54, 0x52, 0x55, 0x43, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x4e, 0x41, 0x4c, 0x59, 0x53,\n\t0x49, 0x53, 0x10, 0x02, 0x2a, 0x2f, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12,\n\t0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53,\n\t0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x46, 0x41, 0x49,\n\t0x4c, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x5a, 0x0a, 0x05, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x0f,\n\t0x0a, 0x0b, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x44, 0x45, 0x42, 0x55, 0x47, 0x10, 0x00, 0x12,\n\t0x0e, 0x0a, 0x0a, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x01, 0x12,\n\t0x0e, 0x0a, 0x0a, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x57, 0x41, 0x52, 0x4e, 0x10, 0x02, 0x12,\n\t0x0f, 0x0a, 0x0b, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03,\n\t0x12, 0x0f, 0x0a, 0x0b, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x46, 0x41, 0x54, 0x41, 0x4c, 0x10,\n\t0x04, 0x32, 0xab, 0x01, 0x0a, 0x03, 0x4c, 0x6f, 0x67, 0x12, 0x52, 0x0a, 0x0f, 0x55, 0x70, 0x6c,\n\t0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1c, 0x2e, 0x6c,\n\t0x6f, 0x67, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x72, 0x65,\n\t0x61, 0x6d, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6c, 0x6f, 0x67,\n\t0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,\n\t0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x50, 0x0a,\n\t0x0f, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,\n\t0x12, 0x1c, 0x2e, 0x6c, 0x6f, 0x67, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x67,\n\t0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,\n\t0x2e, 0x6c, 0x6f, 0x67, 0x2e, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x53, 0x74,\n\t0x61, 0x74, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42,\n\t0x0b, 0x5a, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6c, 0x6f, 0x67, 0x62, 0x06, 0x70, 0x72,\n\t0x6f, 0x74, 0x6f, 0x33,\n}\n\nvar (\n\tfile_log_log_proto_rawDescOnce sync.Once\n\tfile_log_log_proto_rawDescData = file_log_log_proto_rawDesc\n)\n\nfunc file_log_log_proto_rawDescGZIP() []byte {\n\tfile_log_log_proto_rawDescOnce.Do(func() {\n\t\tfile_log_log_proto_rawDescData = protoimpl.X.CompressGZIP(file_log_log_proto_rawDescData)\n\t})\n\treturn file_log_log_proto_rawDescData\n}\n\nvar file_log_log_proto_enumTypes = make([]protoimpl.EnumInfo, 3)\nvar file_log_log_proto_msgTypes = make([]protoimpl.MessageInfo, 6)\nvar file_log_log_proto_goTypes = []interface{}{\n\t(StageType)(0),                   // 0: log.StageType\n\t(Status)(0),                      // 1: log.Status\n\t(Level)(0),                       // 2: log.Level\n\t(*UploadLogStream)(nil),          // 3: log.UploadLogStream\n\t(*UploadLogStatus)(nil),          // 4: log.UploadLogStatus\n\t(*UploadLogStream_Request)(nil),  // 5: log.UploadLogStream.Request\n\t(*UploadLogStream_Response)(nil), // 6: log.UploadLogStream.Response\n\t(*UploadLogStatus_Request)(nil),  // 7: log.UploadLogStatus.Request\n\t(*UploadLogStatus_Response)(nil), // 8: log.UploadLogStatus.Response\n\t(*timestamppb.Timestamp)(nil),    // 9: google.protobuf.Timestamp\n\t(*common.Diagnosis)(nil),         // 10: common.Diagnosis\n}\nvar file_log_log_proto_depIdxs = []int32{\n\t0,  // 0: log.UploadLogStream.Request.stage:type_name -> log.StageType\n\t2,  // 1: log.UploadLogStream.Request.level:type_name -> log.Level\n\t9,  // 2: log.UploadLogStream.Request.time:type_name -> google.protobuf.Timestamp\n\t0,  // 3: log.UploadLogStatus.Request.stage:type_name -> log.StageType\n\t1,  // 4: log.UploadLogStatus.Request.status:type_name -> log.Status\n\t9,  // 5: log.UploadLogStatus.Request.time:type_name -> google.protobuf.Timestamp\n\t10, // 6: log.UploadLogStatus.Response.diagnosis:type_name -> common.Diagnosis\n\t5,  // 7: log.Log.UploadLogStream:input_type -> log.UploadLogStream.Request\n\t7,  // 8: log.Log.UploadLogStatus:input_type -> log.UploadLogStatus.Request\n\t6,  // 9: log.Log.UploadLogStream:output_type -> log.UploadLogStream.Response\n\t8,  // 10: log.Log.UploadLogStatus:output_type -> log.UploadLogStatus.Response\n\t9,  // [9:11] is the sub-list for method output_type\n\t7,  // [7:9] is the sub-list for method input_type\n\t7,  // [7:7] is the sub-list for extension type_name\n\t7,  // [7:7] is the sub-list for extension extendee\n\t0,  // [0:7] is the sub-list for field type_name\n}\n\nfunc init() { file_log_log_proto_init() }\nfunc file_log_log_proto_init() {\n\tif File_log_log_proto != nil {\n\t\treturn\n\t}\n\tif !protoimpl.UnsafeEnabled {\n\t\tfile_log_log_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadLogStream); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_log_log_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadLogStatus); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_log_log_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadLogStream_Request); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_log_log_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadLogStream_Response); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_log_log_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadLogStatus_Request); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tfile_log_log_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {\n\t\t\tswitch v := v.(*UploadLogStatus_Response); i {\n\t\t\tcase 0:\n\t\t\t\treturn &v.state\n\t\t\tcase 1:\n\t\t\t\treturn &v.sizeCache\n\t\t\tcase 2:\n\t\t\t\treturn &v.unknownFields\n\t\t\tdefault:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: file_log_log_proto_rawDesc,\n\t\t\tNumEnums:      3,\n\t\t\tNumMessages:   6,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   1,\n\t\t},\n\t\tGoTypes:           file_log_log_proto_goTypes,\n\t\tDependencyIndexes: file_log_log_proto_depIdxs,\n\t\tEnumInfos:         file_log_log_proto_enumTypes,\n\t\tMessageInfos:      file_log_log_proto_msgTypes,\n\t}.Build()\n\tFile_log_log_proto = out.File\n\tfile_log_log_proto_rawDesc = nil\n\tfile_log_log_proto_goTypes = nil\n\tfile_log_log_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "pkg/grpc/pb/log/log.proto",
    "content": "syntax = \"proto3\";\noption go_package = \"proto/log\";\npackage log;\nimport \"google/protobuf/timestamp.proto\";\nimport \"common/common.proto\";\n// protoc --proto_path=pkg/grpc/pb/log --proto_path=pkg/grpc/tripartite --go_out=pkg/grpc/pb/log --go_opt=paths=source_relative --go-grpc_out=pkg/grpc/pb/log --go-grpc_opt=paths=source_relative  log.proto\n\n\nmessage UploadLogStream {\n    message Request {\n\n        StageType stage = 1;\n\n        // log id, task uniq\n        uint64 index = 2;\n\n        string msg = 3;\n\n        Level level = 4;\n\n        // log product time\n        google.protobuf.Timestamp time = 5;\n    }\n\n    message Response {\n\n    }\n}\n\nenum StageType {\n    STAGE_TYPE_INITIALIZING = 0;\n    STAGE_TYPE_PULL_INFRASTRUCTURE = 1;\n    STAGE_TYPE_INFRASTRUCTURE_ANALYSIS = 2;\n}\n\nenum Status {\n    STATUS_SUCCESS = 0;\n    STATUS_FAILED = 1;\n}\n\nenum Level {\n    LEVEL_DEBUG = 0;\n    LEVEL_INFO = 1;\n    LEVEL_WARN = 2;\n    LEVEL_ERROR = 3;\n    LEVEL_FATAL = 4;\n}\n\nmessage UploadLogStatus {\n    message Request {\n        StageType stage = 1;\n\n        Status status = 2;\n\n        // status change time\n        google.protobuf.Timestamp time = 3;\n    }\n\n    message Response {\n        common.Diagnosis diagnosis = 1;\n    }\n}\n\n\n\n\n\n\nservice Log {\n\n    rpc UploadLogStream (stream UploadLogStream.Request) returns (UploadLogStream.Response) {};\n\n    rpc UploadLogStatus (UploadLogStatus.Request) returns (UploadLogStatus.Response) {};\n}"
  },
  {
    "path": "pkg/grpc/pb/log/log_grpc.pb.go",
    "content": "// Code generated by protoc-gen-go-grpc. DO NOT EDIT.\n// versions:\n// - protoc-gen-go-grpc v1.2.0\n// - protoc             v3.20.1\n// source: log.proto\n\npackage log\n\nimport (\n\tcontext \"context\"\n\tgrpc \"google.golang.org/grpc\"\n\tcodes \"google.golang.org/grpc/codes\"\n\tstatus \"google.golang.org/grpc/status\"\n)\n\n// This is a compile-time assertion to ensure that this generated file\n// is compatible with the grpc package it is being compiled against.\n// Requires gRPC-Go v1.32.0 or later.\nconst _ = grpc.SupportPackageIsVersion7\n\n// LogClient is the client API for Log service.\n//\n// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.\ntype LogClient interface {\n\tUploadLogStream(ctx context.Context, opts ...grpc.CallOption) (Log_UploadLogStreamClient, error)\n\tUploadLogStatus(ctx context.Context, in *UploadLogStatus_Request, opts ...grpc.CallOption) (*UploadLogStatus_Response, error)\n}\n\ntype logClient struct {\n\tcc grpc.ClientConnInterface\n}\n\nfunc NewLogClient(cc grpc.ClientConnInterface) LogClient {\n\treturn &logClient{cc}\n}\n\nfunc (c *logClient) UploadLogStream(ctx context.Context, opts ...grpc.CallOption) (Log_UploadLogStreamClient, error) {\n\tstream, err := c.cc.NewStream(ctx, &Log_ServiceDesc.Streams[0], \"/log.Log/UploadLogStream\", opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tx := &logUploadLogStreamClient{stream}\n\treturn x, nil\n}\n\ntype Log_UploadLogStreamClient interface {\n\tSend(*UploadLogStream_Request) error\n\tCloseAndRecv() (*UploadLogStream_Response, error)\n\tgrpc.ClientStream\n}\n\ntype logUploadLogStreamClient struct {\n\tgrpc.ClientStream\n}\n\nfunc (x *logUploadLogStreamClient) Send(m *UploadLogStream_Request) error {\n\treturn x.ClientStream.SendMsg(m)\n}\n\nfunc (x *logUploadLogStreamClient) CloseAndRecv() (*UploadLogStream_Response, error) {\n\tif err := x.ClientStream.CloseSend(); err != nil {\n\t\treturn nil, err\n\t}\n\tm := new(UploadLogStream_Response)\n\tif err := x.ClientStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc (c *logClient) UploadLogStatus(ctx context.Context, in *UploadLogStatus_Request, opts ...grpc.CallOption) (*UploadLogStatus_Response, error) {\n\tout := new(UploadLogStatus_Response)\n\terr := c.cc.Invoke(ctx, \"/log.Log/UploadLogStatus\", in, out, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\n// LogServer is the server API for Log service.\n// All implementations must embed UnimplementedLogServer\n// for forward compatibility\ntype LogServer interface {\n\tUploadLogStream(Log_UploadLogStreamServer) error\n\tUploadLogStatus(context.Context, *UploadLogStatus_Request) (*UploadLogStatus_Response, error)\n\tmustEmbedUnimplementedLogServer()\n}\n\n// UnimplementedLogServer must be embedded to have forward compatible implementations.\ntype UnimplementedLogServer struct {\n}\n\nfunc (UnimplementedLogServer) UploadLogStream(Log_UploadLogStreamServer) error {\n\treturn status.Errorf(codes.Unimplemented, \"method UploadLogStream not implemented\")\n}\nfunc (UnimplementedLogServer) UploadLogStatus(context.Context, *UploadLogStatus_Request) (*UploadLogStatus_Response, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method UploadLogStatus not implemented\")\n}\nfunc (UnimplementedLogServer) mustEmbedUnimplementedLogServer() {}\n\n// UnsafeLogServer may be embedded to opt out of forward compatibility for this service.\n// Use of this interface is not recommended, as added methods to LogServer will\n// result in compilation errors.\ntype UnsafeLogServer interface {\n\tmustEmbedUnimplementedLogServer()\n}\n\nfunc RegisterLogServer(s grpc.ServiceRegistrar, srv LogServer) {\n\ts.RegisterService(&Log_ServiceDesc, srv)\n}\n\nfunc _Log_UploadLogStream_Handler(srv interface{}, stream grpc.ServerStream) error {\n\treturn srv.(LogServer).UploadLogStream(&logUploadLogStreamServer{stream})\n}\n\ntype Log_UploadLogStreamServer interface {\n\tSendAndClose(*UploadLogStream_Response) error\n\tRecv() (*UploadLogStream_Request, error)\n\tgrpc.ServerStream\n}\n\ntype logUploadLogStreamServer struct {\n\tgrpc.ServerStream\n}\n\nfunc (x *logUploadLogStreamServer) SendAndClose(m *UploadLogStream_Response) error {\n\treturn x.ServerStream.SendMsg(m)\n}\n\nfunc (x *logUploadLogStreamServer) Recv() (*UploadLogStream_Request, error) {\n\tm := new(UploadLogStream_Request)\n\tif err := x.ServerStream.RecvMsg(m); err != nil {\n\t\treturn nil, err\n\t}\n\treturn m, nil\n}\n\nfunc _Log_UploadLogStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(UploadLogStatus_Request)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(LogServer).UploadLogStatus(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: \"/log.Log/UploadLogStatus\",\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(LogServer).UploadLogStatus(ctx, req.(*UploadLogStatus_Request))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\n// Log_ServiceDesc is the grpc.ServiceDesc for Log service.\n// It's only intended for direct use with grpc.RegisterService,\n// and not to be introspected or modified (even as a copy)\nvar Log_ServiceDesc = grpc.ServiceDesc{\n\tServiceName: \"log.Log\",\n\tHandlerType: (*LogServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"UploadLogStatus\",\n\t\t\tHandler:    _Log_UploadLogStatus_Handler,\n\t\t},\n\t},\n\tStreams: []grpc.StreamDesc{\n\t\t{\n\t\t\tStreamName:    \"UploadLogStream\",\n\t\t\tHandler:       _Log_UploadLogStream_Handler,\n\t\t\tClientStreams: true,\n\t\t},\n\t},\n\tMetadata: \"log.proto\",\n}\n"
  },
  {
    "path": "pkg/grpc/pb/log/transport.go",
    "content": "package log\n\nimport (\n\t\"time\"\n\n\t\"github.com/songzhibin97/gkit/coding\"\n\t_ \"github.com/songzhibin97/gkit/coding/json\"\n)\n\nvar jsonCoding = coding.GetCode(\"json\")\n\nfunc TransportWsMsg(rec *UploadLogStream_Request) (ret []byte, err error) {\n\treturn jsonCoding.Marshal(&struct {\n\t\tStage int       `json:\"stage\"`\n\t\tIndex uint64    `json:\"index\"`\n\t\tMsg   string    `json:\"msg\"`\n\t\tLevel int       `json:\"level\"`\n\t\tTime  time.Time `json:\"time\"`\n\t}{\n\t\tStage: int(rec.GetStage()),\n\t\tIndex: rec.GetIndex(),\n\t\tMsg:   rec.GetMsg(),\n\t\tLevel: int(rec.GetLevel()),\n\t\tTime:  rec.GetTime().AsTime(),\n\t})\n}\n"
  },
  {
    "path": "pkg/grpc/pb/log/transport_test.go",
    "content": "package log\n\nimport (\n\t\"testing\"\n\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n)\n\nfunc TestTransportWsMsg(t *testing.T) {\n\tv, err := TransportWsMsg(&UploadLogStream_Request{\n\t\tStage: 1,\n\t\tIndex: 1,\n\t\tMsg:   \"tttt\",\n\t\tLevel: 1,\n\t\tTime:  timestamppb.Now(),\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tt.Log(v)\n}\n"
  },
  {
    "path": "pkg/grpc/stream_uploader.go",
    "content": "package grpc\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"google.golang.org/grpc\"\n\t\"sync\"\n\t\"time\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype StreamClient[Request, Response any] interface {\n\tSend(Request) error\n\tCloseAndRecv() (Response, error)\n\tgrpc.ClientStream\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype StreamUploaderOptions[Client, ID, Request, Response any] struct {\n\n\t// Used to distinguish between different instances\n\tName string\n\n\t// The client used in the grpc call\n\tClient StreamClient[Request, Response]\n\n\t// The size of the queue of tasks waiting to be sent\n\tWaitSendTaskQueueBuffSize int\n\n\t// Used to send messages to the outside world\n\tMessageChannel *message.Channel[*schema.Diagnostics]\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype StreamUploader[Client, ID, Request, Response any] struct {\n\n\t//Various options when uploading\n\toptions *StreamUploaderOptions[Client, ID, Request, Response]\n\n\t// The queue of logs waiting to be sent\n\twaitSendTaskQueue chan *UploadTask[ID, Request]\n\n\t// Used to coordinate several workers\n\tworkerWg sync.WaitGroup\n}\n\nfunc NewStreamUploader[Client, ID, Request, Response any](options *StreamUploaderOptions[Client, ID, Request, Response]) *StreamUploader[Client, ID, Request, Response] {\n\treturn &StreamUploader[Client, ID, Request, Response]{\n\t\toptions:           options,\n\t\twaitSendTaskQueue: make(chan *UploadTask[ID, Request], options.WaitSendTaskQueueBuffSize),\n\t\tworkerWg:          sync.WaitGroup{},\n\t}\n}\n\nfunc (x *StreamUploader[Client, ID, Request, Response]) GetOptions() *StreamUploaderOptions[Client, ID, Request, Response] {\n\treturn x.options\n}\n\n// Submit the message to the send queue\nfunc (x *StreamUploader[Client, ID, Request, Response]) Submit(ctx context.Context, id ID, request Request) (bool, *schema.Diagnostics) {\n\n\ttask := &UploadTask[ID, Request]{\n\t\tTaskId:  id,\n\t\tRequest: request,\n\t}\n\n\tfor submitTryTimes := 0; submitTryTimes < 10000; submitTryTimes++ {\n\t\tlogger.InfoF(\"stream uploader name %s, id = %s, submit begin, try times = %d\", x.options.Name, utils.Strava(id), submitTryTimes)\n\t\tselect {\n\t\tcase x.waitSendTaskQueue <- task:\n\t\t\tlogger.InfoF(\"stream uploader name %s, id = %s, submit success, try times = %d\", x.options.Name, utils.Strava(id), submitTryTimes)\n\t\t\treturn true, nil\n\t\tcase <-ctx.Done():\n\t\t\tlogger.InfoF(\"stream uploader name %s, id = %s, submit timeout, try times = %d\", x.options.Name, utils.Strava(id), submitTryTimes)\n\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"stream uploader name %s, id = %s, submit request timeout, try times = %d\", x.options.Name, utils.Strava(id), submitTryTimes))\n\t\t}\n\t}\n\n\tlogger.InfoF(\"stream uploader name %s, id = %s, submit final failed\", x.options.Name, utils.Strava(id))\n\treturn false, schema.NewDiagnostics().AddErrorMsg(\"stream uploader name %s, id = %s, submit request timeout\", x.options.Name, utils.Strava(id))\n}\n\n// ShutdownAndWait Close the task queue while waiting for the remaining messages in the queue to finish sending\nfunc (x *StreamUploader[Client, ID, Request, Response]) ShutdownAndWait(ctx context.Context) *schema.Diagnostics {\n\n\tdefer func() {\n\t\tlogger.InfoF(\"stream uploader %s message channel SenderWaitAndClose begin\", x.options.Name)\n\t\tx.options.MessageChannel.SenderWaitAndClose()\n\t\tlogger.InfoF(\"stream uploader %s message channel SenderWaitAndClose end\", x.options.Name)\n\t}()\n\n\tclose(x.waitSendTaskQueue)\n\tlogger.InfoF(\"stream uploader %s close waitSendTaskQueue\", x.options.Name)\n\n\tlogger.InfoF(\"stream uploader %s wait group begin\", x.options.Name)\n\tx.workerWg.Wait()\n\tlogger.InfoF(\"stream uploader %s wait group done\", x.options.Name)\n\n\treturn nil\n}\n\n//func (x *StreamUploader[Client, ID, Request, Response]) runReceiveWorker() {\n//\n//\tx.workerWg.Add(1)\n//\n//\tgo func() {\n//\n//\t\tdefer func() {\n//\t\t\tx.workerWg.Done()\n//\t\t}()\n//\n//\t\tfor {\n//\t\t\tvar response Response\n//\t\t\terr := x.options.Client.RecvMsg(&response)\n//\t\t\tif err != nil {\n//\t\t\t\tif errors.Is(err, io.EOF) {\n//\t\t\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"stream uploader name %s, cloud ack receiver exit\", x.options.Name))\n//\t\t\t\t\treturn\n//\t\t\t\t} else {\n//\t\t\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"stream uploader name %s, receive response error: %s\", x.options.Name, err.Error()))\n//\t\t\t\t}\n//\t\t\t} else {\n//\t\t\t\tid, err := x.options.ResponseAckFunc(response)\n//\t\t\t\tif err != nil {\n//\t\t\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"stream uploader name %s, extract ack id error: %s\", x.options.Name, err.Error()))\n//\t\t\t\t\tx.options.MessageChannel.Send(x.processResponseACKFailed(id, err))\n//\t\t\t\t} else {\n//\t\t\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"stream uploader name %s, cloud ack message id %d\", x.options.Name, id))\n//\t\t\t\t\tx.options.MessageChannel.Send(x.processResponseACKOk(id))\n//\t\t\t\t}\n//\t\t\t}\n//\t\t}\n//\t}()\n//}\n//\n//func (x *StreamUploader[Client, ID, Request, Response]) processResponseACKOk(id ID) *schema.Diagnostics {\n//\n//\tx.waitResponseAckTaskSetLock.Lock()\n//\tdefer x.waitResponseAckTaskSetLock.Unlock()\n//\n//\t// Removes the message from the ack collection\n//\tdelete(x.waitResponseAckTaskSet, id)\n//\n//\treturn nil\n//}\n//\n//func (x *StreamUploader[Client, ID, Request, Response]) processResponseACKFailed(id ID, err error) *schema.Diagnostics {\n//\n//\tdiagnostics := schema.NewDiagnostics()\n//\n//\tx.waitResponseAckTaskSetLock.Lock()\n//\tdefer x.waitResponseAckTaskSetLock.Unlock()\n//\n//\ttask, exists := x.waitResponseAckTaskSet[id]\n//\tif !exists {\n//\t\treturn diagnostics.AddErrorMsg(\"stream uploader name %s, cloud ack message id %d not found\", x.options.Name, id)\n//\t}\n//\n//\tif task.TryTimes >= 3 {\n//\t\treturn diagnostics.AddErrorMsg(\"stream uploader name %s, send message id %d try times used up\", x.options.Name, id)\n//\t}\n//\n//}\n\nfunc (x *StreamUploader[Client, ID, Request, Response]) RunUploaderWorker() {\n\n\tx.workerWg.Add(1)\n\n\tgo func() {\n\n\t\t// Set the exit flag when exiting\n\t\tdefer func() {\n\t\t\tlogger.InfoF(\"stream uploader %s, begin close stream client\", x.options.Name)\n\t\t\tresponse, err := x.options.Client.CloseAndRecv()\n\t\t\tif err != nil {\n\t\t\t\tlogger.ErrorF(\"stream uploader %s, close stream client error: %s, response = %v\", x.options.Name, err.Error(), response)\n\t\t\t} else {\n\t\t\t\tlogger.InfoF(\"stream uploader %s, close stream client success, response = %v\", x.options.Name, response)\n\t\t\t}\n\t\t\tx.workerWg.Done()\n\t\t}()\n\n\t\ttimer := time.NewTimer(time.Second)\n\t\tdefer timer.Stop()\n\n\t\tcontinueIdleCount := 0\n\t\tfor {\n\t\t\ttimer.Reset(time.Second)\n\t\t\tselect {\n\t\t\tcase task, ok := <-x.waitSendTaskQueue:\n\n\t\t\t\tcontinueIdleCount = 0\n\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.InfoF(\"stream uploader name %s, wait send task queue closed, worker exiting\", x.options.Name)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\terr := x.options.Client.Send(task.Request)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.ErrorF(\"stream uploader name %s, send message error: %s, id = %s\", x.options.Name, err.Error(), utils.Strava(task.TaskId))\n\t\t\t\t\t//return\n\t\t\t\t} else {\n\t\t\t\t\tlogger.InfoF(\"stream uploader name %s, send message success, id = %s\", x.options.Name, utils.Strava(task.TaskId))\n\t\t\t\t}\n\n\t\t\tcase <-timer.C:\n\n\t\t\t\tcontinueIdleCount++\n\t\t\t\tlogger.InfoF(\"stream uploader name %s, wait task, idle count %d\", x.options.Name, continueIdleCount)\n\t\t\t}\n\t\t}\n\t}()\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// UploadTask Represents a task to be uploaded\ntype UploadTask[ID, Request any] struct {\n\n\t// What is the ID of this task\n\tTaskId ID\n\n\t// A request to send\n\tRequest Request\n\n\t//TryTimes int\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n//type SyncTaskMap[ID, Request comparable] struct {\n//\tlock    *sync.RWMutex\n//\ttaskMap map[ID]*UploadTask[ID, Request]\n//}\n//\n//func NewSyncTaskMap[ID, Request comparable]() *SyncTaskMap[ID, Request] {\n//\treturn &SyncTaskMap[ID, Request]{\n//\t\tlock:    &sync.RWMutex{},\n//\t\ttaskMap: make(map[ID]*UploadTask[ID, Request]),\n//\t}\n//}\n//\n//func (x *SyncTaskMap[ID, Request]) Set(id ID, task *UploadTask[ID, Request]) {\n//\tx.Run(func(taskMap map[ID]*UploadTask[ID, Request]) {\n//\t\ttaskMap[id] = task\n//\t})\n//}\n//\n//func (x *SyncTaskMap[ID, Request]) Delete(id ID, task *UploadTask[ID, Request]) {\n//\tx.Run(func(taskMap map[ID]*UploadTask[ID, Request]) {\n//\t\tdelete(taskMap, id)\n//\t})\n//}\n//\n//func (x *SyncTaskMap[ID, Request]) Get(id ID) (response *UploadTask[ID, Request], exists bool) {\n//\tx.Run(func(taskMap map[ID]*UploadTask[ID, Request]) {\n//\t\tresponse, exists = taskMap[id]\n//\t})\n//\treturn\n//}\n//\n//func (x *SyncTaskMap[ID, Request]) Run(runFunc func(taskMap map[ID]*UploadTask[ID, Request])) {\n//\tx.lock.Lock()\n//\tdefer x.lock.Unlock()\n//\n//\trunFunc(x.taskMap)\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/grpc/stream_uploader_test.go",
    "content": "package grpc\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"google.golang.org/grpc/metadata\"\n\t\"testing\"\n)\n\ntype testGRpcClient struct {\n}\n\nvar _ StreamClient[*testGRpcRequest, *testGRpcResponse]\n\nfunc (t testGRpcClient) Send(request *testGRpcRequest) error {\n\tfmt.Println(\"send message: \" + request.id)\n\treturn nil\n}\n\nfunc (t testGRpcClient) CloseAndRecv() (*testGRpcResponse, error) {\n\tfmt.Println(\"close stream\")\n\treturn nil, nil\n}\n\nfunc (t testGRpcClient) Header() (metadata.MD, error) {\n\treturn nil, nil\n}\n\nfunc (t testGRpcClient) Trailer() metadata.MD {\n\treturn nil\n}\n\nfunc (t testGRpcClient) CloseSend() error {\n\treturn nil\n}\n\nfunc (t testGRpcClient) Context() context.Context {\n\treturn context.Background()\n}\n\nfunc (t testGRpcClient) SendMsg(m interface{}) error {\n\treturn nil\n}\n\nfunc (t testGRpcClient) RecvMsg(m interface{}) error {\n\treturn nil\n}\n\ntype testGRpcRequest struct {\n\tid string\n}\n\ntype testGRpcResponse struct {\n\tid string\n}\n\nfunc TestNewStreamUploader(t *testing.T) {\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.ToString())\n\t\t}\n\t})\n\toptions := &StreamUploaderOptions[*testGRpcClient, string, *testGRpcRequest, *testGRpcResponse]{\n\t\tName:                      \"test-stream-uploader\",\n\t\tClient:                    &testGRpcClient{},\n\t\tWaitSendTaskQueueBuffSize: 1,\n\t\tMessageChannel:            messageChannel,\n\t}\n\tuploader := NewStreamUploader(options)\n\tuploader.RunUploaderWorker()\n\n\tfor i := 0; i < 100; i++ {\n\t\tid := id_util.RandomId()\n\t\tsubmitSuccess, diagnostics := uploader.Submit(context.Background(), id, &testGRpcRequest{id: id})\n\t\tassert.False(t, utils.HasError(diagnostics))\n\t\tassert.True(t, submitSuccess)\n\t}\n\tuploader.ShutdownAndWait(context.Background())\n\tmessageChannel.ReceiverWait()\n\n}\n"
  },
  {
    "path": "pkg/grpc/tripartite/google/protobuf/timestamp.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption cc_enable_arenas = true;\noption go_package = \"google.golang.org/protobuf/types/known/timestamppb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"TimestampProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// A Timestamp represents a point in time independent of any time zone or local\n// calendar, encoded as a count of seconds and fractions of seconds at\n// nanosecond resolution. The count is relative to an epoch at UTC midnight on\n// January 1, 1970, in the proleptic Gregorian calendar which extends the\n// Gregorian calendar backwards to year one.\n//\n// All minutes are 60 seconds long. Leap seconds are \"smeared\" so that no leap\n// second table is needed for interpretation, using a [24-hour linear\n// smear](https://developers.google.com/time/smear).\n//\n// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By\n// restricting to that range, we ensure that we can convert to and from [RFC\n// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.\n//\n// # Examples\n//\n// Example 1: Compute Timestamp from POSIX `time()`.\n//\n//     Timestamp timestamp;\n//     timestamp.set_seconds(time(NULL));\n//     timestamp.set_nanos(0);\n//\n// Example 2: Compute Timestamp from POSIX `gettimeofday()`.\n//\n//     struct timeval tv;\n//     gettimeofday(&tv, NULL);\n//\n//     Timestamp timestamp;\n//     timestamp.set_seconds(tv.tv_sec);\n//     timestamp.set_nanos(tv.tv_usec * 1000);\n//\n// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.\n//\n//     FILETIME ft;\n//     GetSystemTimeAsFileTime(&ft);\n//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;\n//\n//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z\n//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.\n//     Timestamp timestamp;\n//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));\n//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));\n//\n// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.\n//\n//     long millis = System.currentTimeMillis();\n//\n//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)\n//         .setNanos((int) ((millis % 1000) * 1000000)).build();\n//\n//\n// Example 5: Compute Timestamp from Java `Instant.now()`.\n//\n//     Instant now = Instant.now();\n//\n//     Timestamp timestamp =\n//         Timestamp.newBuilder().setSeconds(now.getEpochSecond())\n//             .setNanos(now.getNano()).build();\n//\n//\n// Example 6: Compute Timestamp from current time in Python.\n//\n//     timestamp = Timestamp()\n//     timestamp.GetCurrentTime()\n//\n// # JSON Mapping\n//\n// In JSON format, the Timestamp type is encoded as a string in the\n// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the\n// format is \"{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z\"\n// where {year} is always expressed using four digits while {month}, {day},\n// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional\n// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),\n// are optional. The \"Z\" suffix indicates the timezone (\"UTC\"); the timezone\n// is required. A proto3 JSON serializer should always use UTC (as indicated by\n// \"Z\") when printing the Timestamp type and a proto3 JSON parser should be\n// able to accept both UTC and other timezones (as indicated by an offset).\n//\n// For example, \"2017-01-15T01:30:15.01Z\" encodes 15.01 seconds past\n// 01:30 UTC on January 15, 2017.\n//\n// In JavaScript, one can convert a Date object to this format using the\n// standard\n// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)\n// method. In Python, a standard `datetime.datetime` object can be converted\n// to this format using\n// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with\n// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use\n// the Joda Time's [`ISODateTimeFormat.dateTime()`](\n// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D\n// ) to obtain a formatter capable of generating timestamps in this format.\n//\n//\nmessage Timestamp {\n  // Represents seconds of UTC time since Unix epoch\n  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n  // 9999-12-31T23:59:59Z inclusive.\n  int64 seconds = 1;\n\n  // Non-negative fractions of a second at nanosecond resolution. Negative\n  // second values with fractions must still have non-negative nanos values\n  // that count forward in time. Must be from 0 to 999,999,999\n  // inclusive.\n  int32 nanos = 2;\n}\n"
  },
  {
    "path": "pkg/http_client/getter.go",
    "content": "package http_client\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"os\"\n\t\"time\"\n\n\tgetter \"github.com/hashicorp/go-getter\"\n)\n\ntype Detector struct {\n\tName     string\n\tDetector getter.Detector\n}\n\nvar (\n\tdetectors = []getter.Detector{\n\t\tnew(getter.GitHubDetector),\n\t\tnew(getter.GitDetector),\n\t\tnew(getter.S3Detector),\n\t\tnew(getter.GCSDetector),\n\t\tnew(getter.FileDetector),\n\t}\n\n\tdecompressors = map[string]getter.Decompressor{\n\t\t\"bz2\": new(getter.Bzip2Decompressor),\n\t\t\"gz\":  new(getter.GzipDecompressor),\n\t\t\"xz\":  new(getter.XzDecompressor),\n\t\t\"zip\": new(getter.ZipDecompressor),\n\n\t\t\"tar.bz2\":  new(getter.TarBzip2Decompressor),\n\t\t\"tar.tbz2\": new(getter.TarBzip2Decompressor),\n\n\t\t\"tar.gz\": new(getter.TarGzipDecompressor),\n\t\t\"tgz\":    new(getter.TarGzipDecompressor),\n\n\t\t\"tar.xz\": new(getter.TarXzDecompressor),\n\t\t\"txz\":    new(getter.TarXzDecompressor),\n\t}\n\n\tgetters = map[string]getter.Getter{\n\t\t\"file\":   new(getter.FileGetter),\n\t\t\"gcs\":    new(getter.GCSGetter),\n\t\t\"github\": new(getter.GitGetter),\n\t\t\"git\":    new(getter.GitGetter),\n\t\t\"hg\":     new(getter.HgGetter),\n\t\t\"s3\":     new(getter.S3Getter),\n\t\t\"http\":   httpGetter,\n\t\t\"https\":  httpGetter,\n\t}\n)\n\nvar httpGetter = &getter.HttpGetter{\n\tReadTimeout:           10 * time.Minute,\n\tMaxBytes:              1_000_000_000,\n\tXTerraformGetDisabled: true,\n\t//Client: &http.Client{\n\t//\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t//\t\treturn nil\n\t//\t},\n\t//},\n\tHeader: http.Header{\n\t\t\"User-Agent\": []string{MyUserAgent()},\n\t},\n\t//DoNotCheckHeadFirst: true,\n}\n\nfunc DownloadToDirectory(ctx context.Context, saveDirectory, targetUrl string, progressListener getter.ProgressTracker, options ...getter.ClientOption) error {\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient := getter.Client{\n\t\tSrc:           targetUrl,\n\t\tDst:           saveDirectory,\n\t\tPwd:           pwd,\n\t\tMode:          getter.ClientModeDir,\n\t\tDetectors:     detectors,\n\t\tDecompressors: decompressors,\n\t\tGetters:       getters,\n\t\tCtx:           ctx,\n\t\t// Extra options provided by caller to overwrite default behavior\n\t\tOptions:          options,\n\t\tProgressListener: progressListener,\n\t}\n\n\treturn client.Get()\n}\n\n//func ModuleGet(ctx context.Context, installPath, url string, options ...getter.ClientOption) error {\n//\tpwd, _ := os.Getwd()\n//\tclient := getter.Client{\n//\t\tSrc:           url,\n//\t\tDst:           installPath,\n//\t\tPwd:           pwd,\n//\t\tMode:          getter.ClientModeDir,\n//\t\tDetectors:     detectors,\n//\t\tDecompressors: decompressors,\n//\t\tGetters:       getters,\n//\t\tCtx:           ctx,\n//\t\t// Extra options provided by caller to overwrite default behavior\n//\t\tOptions: options,\n//\t}\n//\n//\tif err := client.DownloadToDirectory(); err != nil {\n//\t\treturn err\n//\t}\n//\treturn nil\n//}\n"
  },
  {
    "path": "pkg/http_client/github_repo_downloader.go",
    "content": "package http_client\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"github.com/hashicorp/go-getter\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"time\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// GitHubRepoDownloaderOptions download github\ntype GitHubRepoDownloaderOptions struct {\n\n\t// Who owns the warehouse, org name or username\n\tOwner string\n\n\t// Name of warehouse\n\tRepo string\n\n\t// Which directory to download it to\n\tDownloadDirectory string\n\n\t// Whether to use a cache, using a cache can avoid repeat download in a short time\n\t// Open is suitable for large warehouse, when the warehouse itself is not large, you can download it directly\n\tCacheTime *time.Duration\n\n\t// It may take a while, but some messages will be sent to you if needed\n\tMessageChannel chan *schema.Diagnostics\n\n\tProgressListener getter.ProgressTracker\n}\n\ntype GitHubRepoDownloader struct {\n}\n\nfunc NewGitHubRepoDownloader() *GitHubRepoDownloader {\n\treturn &GitHubRepoDownloader{}\n}\n\nfunc (x *GitHubRepoDownloader) Download(ctx context.Context, options *GitHubRepoDownloaderOptions) error {\n\n\t// check cache if use it\n\tif options.CacheTime != nil {\n\t\tif x.checkCache(options.DownloadDirectory, *options.CacheTime) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\ttargetUrl := fmt.Sprintf(\"https://github.com/%s/%s/archive/refs/heads/main.zip\", options.Owner, options.Repo)\n\terr := DownloadToDirectory(ctx, options.DownloadDirectory, targetUrl, options.ProgressListener)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif options.CacheTime != nil {\n\t\tif err := x.Save(options.DownloadDirectory, &GitHubRepoCacheMeta{DownloadTime: time.Now()}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (x *GitHubRepoDownloader) checkCache(downloadDirectory string, cacheTime time.Duration) bool {\n\tmeta, err := x.ReadCacheMeta(downloadDirectory)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif time.Now().Sub(meta.DownloadTime) > cacheTime {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// GitHubRepoCacheMeta Cache information from the github repository\ntype GitHubRepoCacheMeta struct {\n\t// The last download time of the repo\n\tDownloadTime time.Time `json:\"download-time\"`\n}\n\n// ReadCacheMeta the github repository cache\nfunc (x *GitHubRepoDownloader) ReadCacheMeta(downloadDirectory string) (*GitHubRepoCacheMeta, error) {\n\tfileBytes, err := os.ReadFile(x.BuildCacheMetaFilePath(downloadDirectory))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := new(GitHubRepoCacheMeta)\n\terr = json.Unmarshal(fileBytes, &r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r, nil\n}\n\n// Save the github repository cache information\nfunc (x *GitHubRepoDownloader) Save(downloadDirectory string, meta *GitHubRepoCacheMeta) error {\n\n\tmetaBytes, err := json.Marshal(meta)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmetaFilePath := x.BuildCacheMetaFilePath(downloadDirectory)\n\treturn os.WriteFile(metaFilePath, metaBytes, os.FileMode(0644))\n}\n\n// BuildCacheMetaFilePath The root path of the downloaded repository is followed by a cache metadata-related file\nfunc (x *GitHubRepoDownloader) BuildCacheMetaFilePath(downloadDirectory string) string {\n\treturn filepath.Join(downloadDirectory, \".selefra-cache-meta\")\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/http_client/requests.go",
    "content": "package http_client\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/cmd/version\"\n\t\"gopkg.in/yaml.v3\"\n\t\"io\"\n\t\"net/http\"\n\t\"reflect\"\n)\n\nconst DefaultMaxTryTimes = 3\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc GetYaml[Response any](ctx context.Context, targetUrl string, options ...*Options[any, Response]) (Response, error) {\n\n\tif len(options) == 0 {\n\t\toptions = append(options, NewOptions[any, Response](targetUrl, YamlResponseHandler[Response]()))\n\t}\n\n\toptions[0] = options[0].WithTargetURL(targetUrl).WithYamlResponseHandler()\n\n\treturn SendRequest[any, Response](ctx, options[0])\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc GetJson[Response any](ctx context.Context, targetUrl string, options ...*Options[any, Response]) (Response, error) {\n\n\tif len(options) == 0 {\n\t\toptions = append(options, NewOptions[any, Response](targetUrl, JsonResponseHandler[Response]()))\n\t}\n\n\toptions[0] = options[0].WithTargetURL(targetUrl).\n\t\tWithJsonResponseHandler().\n\t\tAppendRequestSetting(func(httpRequest *http.Request) error {\n\t\t\thttpRequest.Header.Set(\"Content-Type\", \"application/json\")\n\t\t\treturn nil\n\t\t})\n\n\treturn SendRequest[any, Response](ctx, options[0])\n}\n\nfunc PostJson[Request any, Response any](ctx context.Context, targetUrl string, request Request, options ...*Options[Request, Response]) (Response, error) {\n\n\tif len(options) == 0 {\n\t\toptions = append(options, NewOptions[Request, Response](targetUrl, JsonResponseHandler[Response]()))\n\t}\n\n\tmarshal, err := json.Marshal(request)\n\tif err != nil {\n\t\tvar zero Response\n\t\treturn zero, fmt.Errorf(\"PostJson json marshal request error: %s, typer = %s\", err.Error(), reflect.TypeOf(request).String())\n\t}\n\n\toptions[0] = options[0].WithTargetURL(targetUrl).\n\t\tWithJsonResponseHandler().\n\t\tAppendRequestSetting(func(httpRequest *http.Request) error {\n\t\t\thttpRequest.Header.Set(\"Content-Type\", \"application/json\")\n\t\t\treturn nil\n\t\t}).\n\t\tWithBody(marshal)\n\n\treturn SendRequest[Request, Response](ctx, options[0])\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc GetBytes(ctx context.Context, targetUrl string, options ...*Options[any, []byte]) ([]byte, error) {\n\n\tif len(options) == 0 {\n\t\toptions = append(options, NewOptions[any, []byte](targetUrl, BytesResponseHandler()))\n\t}\n\n\toptions[0] = options[0].WithTargetURL(targetUrl).\n\t\tWithResponseHandler(BytesResponseHandler())\n\n\treturn SendRequest[any, []byte](ctx, options[0])\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc GetString(ctx context.Context, targetUrl string, options ...*Options[any, []byte]) (string, error) {\n\tresponseBytes, err := GetBytes(ctx, targetUrl, options...)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(responseBytes), nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ResponseHandler A component used to process http responses\ntype ResponseHandler[Response any] func(httpResponse *http.Response) (Response, error)\n\n// BytesResponseHandler The default response handler, automatically reads the response body when the response code is a given value\nfunc BytesResponseHandler(readResponseOnStatusCodeIn ...int) ResponseHandler[[]byte] {\n\n\t// By default, the response body is read only when the status code is 200\n\tif len(readResponseOnStatusCodeIn) == 0 {\n\t\treadResponseOnStatusCodeIn = append(readResponseOnStatusCodeIn, http.StatusOK)\n\t}\n\n\treturn func(httpResponse *http.Response) ([]byte, error) {\n\t\tfor _, status := range readResponseOnStatusCodeIn {\n\t\t\tif status == httpResponse.StatusCode {\n\t\t\t\tresponseBodyBytes, err := io.ReadAll(httpResponse.Body)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"response statuc code: %d, read body error: %s\", httpResponse.StatusCode, err.Error())\n\t\t\t\t}\n\t\t\t\treturn responseBodyBytes, nil\n\t\t\t}\n\t\t}\n\t\treturn nil, fmt.Errorf(\"response status code: %d\", httpResponse.StatusCode)\n\t}\n}\n\nfunc StringResponseHandler(readResponseOnStatusCodeIn ...int) ResponseHandler[string] {\n\treturn func(httpResponse *http.Response) (string, error) {\n\t\tresponseBytes, err := BytesResponseHandler(readResponseOnStatusCodeIn...)(httpResponse)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn string(responseBytes), nil\n\t}\n}\n\nfunc YamlResponseHandler[Response any](readResponseOnStatusCodeIn ...int) ResponseHandler[Response] {\n\treturn func(httpResponse *http.Response) (Response, error) {\n\n\t\tvar r Response\n\n\t\tresponseBytes, err := BytesResponseHandler(readResponseOnStatusCodeIn...)(httpResponse)\n\t\tif err != nil {\n\t\t\treturn r, err\n\t\t}\n\n\t\terr = yaml.Unmarshal(responseBytes, &r)\n\t\tif err != nil {\n\t\t\treturn r, fmt.Errorf(\"response body yaml unmarshal error: %s, type: %s, response body: %s\", err.Error(), reflect.TypeOf(r).String(), string(responseBytes))\n\t\t}\n\t\treturn r, nil\n\t}\n}\n\nfunc JsonResponseHandler[Response any](readResponseOnStatusCodeIn ...int) ResponseHandler[Response] {\n\treturn func(httpResponse *http.Response) (Response, error) {\n\n\t\tvar r Response\n\n\t\tresponseBytes, err := BytesResponseHandler(readResponseOnStatusCodeIn...)(httpResponse)\n\t\tif err != nil {\n\t\t\treturn r, err\n\t\t}\n\n\t\terr = json.Unmarshal(responseBytes, &r)\n\t\tif err != nil {\n\t\t\treturn r, fmt.Errorf(\"response body json unmarshal error: %s, type: %s, response body: %s\", err.Error(), reflect.TypeOf(r).String(), string(responseBytes))\n\t\t}\n\t\treturn r, nil\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype RequestSetting func(httpRequest *http.Request) error\n\nconst DefaultUserAgent = \"selefra-cli\"\n\nfunc DefaultUserAgentRequestSetting() RequestSetting {\n\treturn func(httpRequest *http.Request) error {\n\t\thttpRequest.Header.Set(\"User-Agent\", MyUserAgent())\n\t\treturn nil\n\t}\n}\n\nfunc MyUserAgent() string {\n\treturn fmt.Sprintf(\"%s/%s\", DefaultUserAgent, version.Version)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst DefaultMethod = http.MethodGet\n\ntype Options[Request any, Response any] struct {\n\tMaxTryTimes         int\n\tTargetURL           string\n\tMethod              string\n\tBody                []byte\n\tRequestSettingSlice []RequestSetting\n\tResponseHandler     ResponseHandler[Response]\n\tMessageChannel      chan *schema.Diagnostics\n}\n\nfunc NewOptions[Request any, Response any](targetUrl string, responseHandler ResponseHandler[Response]) *Options[Request, Response] {\n\treturn &Options[Request, Response]{\n\t\tMaxTryTimes:         DefaultMaxTryTimes,\n\t\tTargetURL:           targetUrl,\n\t\tMethod:              DefaultMethod,\n\t\tBody:                []byte{},\n\t\tRequestSettingSlice: nil,\n\t\tResponseHandler:     responseHandler,\n\t}\n}\n\nfunc (x *Options[Request, Response]) WithMaxTryTimes(maxTryTimes int) *Options[Request, Response] {\n\tx.MaxTryTimes = maxTryTimes\n\treturn x\n}\n\nfunc (x *Options[Request, Response]) WithTargetURL(targetURL string) *Options[Request, Response] {\n\tx.TargetURL = targetURL\n\treturn x\n}\n\nfunc (x *Options[Request, Response]) WithMethod(method string) *Options[Request, Response] {\n\tx.Method = method\n\treturn x\n}\n\nfunc (x *Options[Request, Response]) WithBody(body []byte) *Options[Request, Response] {\n\tif x.Method == DefaultMethod {\n\t\tx.Method = http.MethodPost\n\t}\n\tx.Body = body\n\treturn x\n}\n\nfunc (x *Options[Request, Response]) WithRequestSettingSlice(requestSettingSlice []RequestSetting) *Options[Request, Response] {\n\tx.RequestSettingSlice = requestSettingSlice\n\treturn x\n}\n\nfunc (x *Options[Request, Response]) AppendRequestSetting(requestSetting RequestSetting) *Options[Request, Response] {\n\tx.RequestSettingSlice = append(x.RequestSettingSlice, requestSetting)\n\treturn x\n}\n\nfunc (x *Options[Request, Response]) WithResponseHandler(responseHandler ResponseHandler[Response]) *Options[Request, Response] {\n\tx.ResponseHandler = responseHandler\n\treturn x\n}\n\nfunc (x *Options[Request, Response]) WithYamlResponseHandler() *Options[Request, Response] {\n\tx.ResponseHandler = YamlResponseHandler[Response]()\n\treturn x\n}\n\nfunc (x *Options[Request, Response]) WithJsonResponseHandler() *Options[Request, Response] {\n\tx.ResponseHandler = JsonResponseHandler[Response]()\n\treturn x\n}\n\nfunc (x *Options[Request, Response]) WithMessageChannel(messageChannel chan *schema.Diagnostics) *Options[Request, Response] {\n\tx.MessageChannel = messageChannel\n\treturn x\n}\n\nfunc (x *Options[Request, Response]) SendMessage(message *schema.Diagnostics) *Options[Request, Response] {\n\tif x.MessageChannel != nil {\n\t\tx.MessageChannel <- message\n\t}\n\treturn x\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// SendRequest Sending requests is a low-level API\nfunc SendRequest[Request any, Response any](ctx context.Context, options *Options[Request, Response]) (Response, error) {\n\n\t// TODO set default params\n\n\tvar lastErr error\n\tfor tryTimes := 0; tryTimes < options.MaxTryTimes; tryTimes++ {\n\t\tvar client http.Client\n\t\thttpRequest, err := http.NewRequest(options.Method, options.TargetURL, bytes.NewReader(options.Body))\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\n\t\thttpRequest = httpRequest.WithContext(ctx)\n\n\t\tfor _, requestSettingFunc := range options.RequestSettingSlice {\n\t\t\tif err := requestSettingFunc(httpRequest); err != nil {\n\t\t\t\tlastErr = err\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\thttpResponse, err := client.Do(httpRequest)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\t\tdefer httpResponse.Body.Close()\n\n\t\tresponse, err := options.ResponseHandler(httpResponse)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\t\treturn response, nil\n\t}\n\n\tvar zero Response\n\treturn zero, lastErr\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/logger/config.go",
    "content": "package logger\n\nimport (\n\t\"github.com/natefinch/lumberjack\"\n\t\"github.com/selefra/selefra/global\"\n\t\"go.uber.org/zap\"\n\t\"go.uber.org/zap/zapcore\"\n\t\"path/filepath\"\n\t\"strings\"\n)\n\ntype Config struct {\n\tSource              string `yaml:\"source,omitempty\" json:\"source,omitempty\"`\n\tFileLogEnabled      bool   `yaml:\"file_log_enabled,omitempty\" json:\"file_log_enabled,omitempty\"`\n\tConsoleLogEnabled   bool   `yaml:\"enable_console_log,omitempty\" json:\"enable_console_log,omitempty\"`\n\tEncodeLogsAsJson    bool   `yaml:\"encode_logs_as_json,omitempty\" json:\"encode_logs_as_json,omitempty\"`\n\tDirectory           string `yaml:\"directory,omitempty\" json:\"directory,omitempty\"`\n\tLevel               string `yaml:\"level,omitempty\" json:\"level,omitempty\"`\n\tLevelIdentUppercase bool   `yaml:\"level_ident_uppercase,omitempty\" json:\"level_ident_uppercase,omitempty\"`\n\tMaxAge              int    `yaml:\"max_age,omitempty\" json:\"max_age,omitempty\"`\n\tShowLine            bool   `yaml:\"show_line,omitempty\" json:\"show_line,omitempty\"`\n\tConsoleNoColor      bool   `yaml:\"console_no_color,omitempty\" json:\"console_no_color,omitempty\"`\n\tMaxSize             int    `yaml:\"max_size,omitempty\" json:\"max_size,omitempty\"`\n\tMaxBackups          int    `yaml:\"max_backups,omitempty\" json:\"max_backups,omitempty\"`\n\tTimeFormat          string `yaml:\"time_format,omitempty\" json:\"time_format,omitempty\"`\n\tPrefix              string `yaml:\"prefix,omitempty\" json:\"prefix\"`\n}\n\nfunc (c *Config) EncodeLevel() zapcore.LevelEncoder {\n\tswitch {\n\tcase c.LevelIdentUppercase && c.ConsoleNoColor:\n\t\treturn zapcore.CapitalLevelEncoder\n\tcase c.LevelIdentUppercase && !c.ConsoleNoColor:\n\t\treturn zapcore.CapitalColorLevelEncoder\n\tcase !c.LevelIdentUppercase && c.ConsoleNoColor:\n\t\treturn zapcore.LowercaseLevelEncoder\n\tcase !c.LevelIdentUppercase && !c.ConsoleNoColor:\n\t\treturn zapcore.LowercaseColorLevelEncoder\n\tdefault:\n\t\treturn zapcore.LowercaseLevelEncoder\n\t}\n}\n\nfunc (c *Config) TranslationLevel() zapcore.Level {\n\tswitch strings.ToLower(c.Level) {\n\tcase \"debug\":\n\t\treturn zapcore.DebugLevel\n\tcase \"info\":\n\t\treturn zapcore.InfoLevel\n\tcase \"warn\":\n\t\treturn zapcore.WarnLevel\n\tcase \"error\":\n\t\treturn zapcore.ErrorLevel\n\tcase \"dpanic\":\n\t\treturn zapcore.DPanicLevel\n\tcase \"panic\":\n\t\treturn zapcore.PanicLevel\n\tcase \"fatal\":\n\t\treturn zapcore.FatalLevel\n\tdefault:\n\t\treturn zapcore.InfoLevel\n\t}\n}\n\nfunc (c *Config) GetEncoder() zapcore.Encoder {\n\tif c.EncodeLogsAsJson {\n\t\treturn zapcore.NewJSONEncoder(c.GetEncoderConfig())\n\t}\n\treturn zapcore.NewConsoleEncoder(c.GetEncoderConfig())\n}\n\nfunc (c *Config) GetEncoderConfig() zapcore.EncoderConfig {\n\treturn zapcore.EncoderConfig{\n\t\tMessageKey:     \"message\",\n\t\tLevelKey:       \"level\",\n\t\tTimeKey:        \"time\",\n\t\tNameKey:        \"logger\",\n\t\tCallerKey:      \"caller\",\n\t\tFunctionKey:    \"func\",\n\t\tStacktraceKey:  \"stack\",\n\t\tLineEnding:     zapcore.DefaultLineEnding,\n\t\tEncodeLevel:    c.EncodeLevel(),\n\t\tEncodeTime:     zapcore.TimeEncoderOfLayout(\"2006-01-02 15:04:05.000\"),\n\t\tEncodeDuration: zapcore.SecondsDurationEncoder,\n\t\tEncodeCaller:   zapcore.FullCallerEncoder,\n\t\tEncodeName:     zapcore.FullNameEncoder,\n\t}\n}\n\nfunc (c *Config) GetLogWriter(level string) zapcore.WriteSyncer {\n\tfilename := filepath.Join(global.WorkSpace(), c.Directory, c.Source+\".log\")\n\tlumberjackLogger := &lumberjack.Logger{\n\t\tFilename:   filename,\n\t\tMaxSize:    c.MaxSize,\n\t\tMaxAge:     c.MaxAge,\n\t\tMaxBackups: c.MaxBackups,\n\t\tLocalTime:  true,\n\t\tCompress:   false,\n\t}\n\treturn zapcore.AddSync(lumberjackLogger)\n}\n\nfunc (c *Config) GetEncoderCore() []zapcore.Core {\n\tcores := make([]zapcore.Core, 0, 7)\n\tfor level := c.TranslationLevel(); level <= zapcore.FatalLevel; level++ {\n\t\tcores = append(cores, zapcore.NewCore(c.GetEncoder(), c.GetLogWriter(c.TranslationLevel().String()), c.GetLevelPriority(level)))\n\t}\n\treturn cores\n}\n\nfunc (c *Config) GetLevelPriority(level zapcore.Level) zap.LevelEnablerFunc {\n\tswitch level {\n\tcase zapcore.DebugLevel:\n\t\treturn func(level zapcore.Level) bool {\n\t\t\treturn level == zap.DebugLevel\n\t\t}\n\tcase zapcore.InfoLevel:\n\t\treturn func(level zapcore.Level) bool {\n\t\t\treturn level == zap.InfoLevel\n\t\t}\n\tcase zapcore.WarnLevel:\n\t\treturn func(level zapcore.Level) bool {\n\t\t\treturn level == zap.WarnLevel\n\t\t}\n\tcase zapcore.ErrorLevel:\n\t\treturn func(level zapcore.Level) bool {\n\t\t\treturn level == zap.ErrorLevel\n\t\t}\n\tcase zapcore.DPanicLevel:\n\t\treturn func(level zapcore.Level) bool {\n\t\t\treturn level == zap.DPanicLevel\n\t\t}\n\tcase zapcore.PanicLevel:\n\t\treturn func(level zapcore.Level) bool {\n\t\t\treturn level == zap.PanicLevel\n\t\t}\n\tcase zapcore.FatalLevel:\n\t\treturn func(level zapcore.Level) bool {\n\t\t\treturn level == zap.FatalLevel\n\t\t}\n\tdefault:\n\t\treturn func(level zapcore.Level) bool {\n\t\t\treturn level == zap.DebugLevel\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/logger/hc.go",
    "content": "package logger\n\nimport (\n\t\"fmt\"\n\t\"github.com/hashicorp/go-hclog\"\n\t\"go.uber.org/zap/zapcore\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n)\n\n// Logger impl the hclog.Logger interface\nvar _ hclog.Logger = (*Logger)(nil)\n\nfunc (l *Logger) Log(level hclog.Level, msg string, args ...interface{}) {\n\tswitch level {\n\tcase hclog.NoLevel:\n\t\treturn\n\tcase hclog.Trace:\n\t\tl.Trace(msg, args...)\n\tcase hclog.Debug:\n\t\tl.Debug(msg, args...)\n\tcase hclog.Info:\n\t\tl.Info(msg, args...)\n\tcase hclog.Warn:\n\t\tl.Warn(msg, args...)\n\tcase hclog.Error:\n\t\tl.Error(msg, args...)\n\t}\n}\n\nfunc (l *Logger) Trace(msg string, args ...interface{}) {\n\tl.logger.Debug(fmt.Sprintf(msg, args...))\n}\n\nfunc (l *Logger) Debug(msg string, args ...interface{}) {\n\tl.logger.Debug(fmt.Sprintf(msg, args...))\n}\n\nfunc (l *Logger) Info(msg string, args ...interface{}) {\n\tl.logger.Info(fmt.Sprintf(msg, args...))\n}\n\nfunc (l *Logger) Warn(msg string, args ...interface{}) {\n\tl.logger.Warn(fmt.Sprintf(msg, args...))\n}\n\nfunc (l *Logger) Error(msg string, args ...interface{}) {\n\tl.logger.Error(fmt.Sprintf(msg, args...))\n}\n\nfunc (l *Logger) Fatal(msg string, args ...interface{}) {\n\tl.logger.Fatal(fmt.Sprintf(msg, args...))\n}\n\nfunc (l *Logger) IsTrace() bool {\n\treturn false\n}\n\nfunc (l *Logger) IsDebug() bool {\n\treturn l.logger.Core().Enabled(zapcore.DebugLevel)\n}\n\nfunc (l *Logger) IsInfo() bool {\n\treturn l.logger.Core().Enabled(zapcore.InfoLevel)\n}\n\nfunc (l *Logger) IsWarn() bool {\n\treturn l.logger.Core().Enabled(zapcore.WarnLevel)\n}\n\nfunc (l *Logger) IsError() bool {\n\treturn l.logger.Core().Enabled(zapcore.ErrorLevel)\n}\n\nfunc (l *Logger) ImpliedArgs() []interface{} {\n\treturn nil\n}\n\nfunc (l *Logger) With(args ...interface{}) hclog.Logger {\n\treturn l\n}\n\nfunc (l *Logger) Name() string {\n\treturn \"selefra-cli\"\n}\n\nfunc (l *Logger) Named(name string) hclog.Logger {\n\treturn l\n}\n\nfunc (l *Logger) ResetNamed(name string) hclog.Logger {\n\treturn l\n}\n\nfunc (l *Logger) SetLevel(level hclog.Level) {\n\treturn\n}\n\nfunc (l *Logger) StandardLogger(opts *hclog.StandardLoggerOptions) *log.Logger {\n\treturn log.New(l.StandardWriter(opts), \"\", 0)\n}\n\nfunc (l *Logger) StandardWriter(opts *hclog.StandardLoggerOptions) io.Writer {\n\treturn os.Stdin\n}\n"
  },
  {
    "path": "pkg/logger/logger.go",
    "content": "package logger\n\nimport (\n\t\"go.uber.org/zap\"\n\t\"go.uber.org/zap/zapcore\"\n\t\"os\"\n\t\"path/filepath\"\n)\n\n// Logger Logs used in the CLI to save local logs\ntype Logger struct {\n\tlogger *zap.Logger\n}\n\nfunc Default() *Logger {\n\treturn defaultLogger\n}\n\nfunc DebugF(msg string, args ...any) {\n\tdefaultLogger.Debug(msg, args...)\n}\n\nfunc InfoF(msg string, args ...any) {\n\tdefaultLogger.Info(msg, args...)\n}\n\nfunc ErrorF(msg string, args ...any) {\n\tdefaultLogger.Error(msg, args...)\n}\n\nfunc FatalF(msg string, args ...any) {\n\tdefaultLogger.Fatal(msg, args...)\n}\n\nvar defaultLogger, _ = NewLogger(Config{\n\tFileLogEnabled:    true,\n\tConsoleLogEnabled: false,\n\tEncodeLogsAsJson:  true,\n\tConsoleNoColor:    true,\n\tSource:            \"client\",\n\tDirectory:         \"logs\",\n\t// TODO Specifies the log level\n\tLevel: \"info\",\n})\n\nfunc NewLogger(c Config) (*Logger, error) {\n\t// TODO The logs are stored in the current directory\n\tlogDir := filepath.Join(\"./\", c.Directory)\n\t_, err := os.Stat(logDir)\n\tif os.IsNotExist(err) {\n\t\terr = os.Mkdir(logDir, 0755)\n\t}\n\tif err != nil {\n\t\treturn nil, nil\n\t}\n\terrorStack := zap.AddStacktrace(zap.ErrorLevel)\n\n\tdevelopment := zap.Development()\n\n\tlogger := zap.New(zapcore.NewTee(c.GetEncoderCore()...), errorStack, development)\n\n\tif c.ShowLine {\n\t\tlogger = logger.WithOptions(zap.AddCaller())\n\t}\n\n\treturn &Logger{logger: logger}, nil\n}\n"
  },
  {
    "path": "pkg/logger/schema.go",
    "content": "package logger\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"go.uber.org/zap\"\n)\n\n// SelefraSDKClientLogger is the implement of schema.ClientLogger\ntype SelefraSDKClientLogger struct {\n\twrappedLog *Logger\n}\n\n// To match the log system on the SDK, connect the logs of the two systems\nvar _ schema.ClientLogger = (*SelefraSDKClientLogger)(nil)\n\nfunc NewSchemaLogger(wrappedLog ...*Logger) *SelefraSDKClientLogger {\n\tif len(wrappedLog) == 0 {\n\t\twrappedLog = append(wrappedLog, defaultLogger)\n\t}\n\treturn &SelefraSDKClientLogger{\n\t\twrappedLog: wrappedLog[0],\n\t}\n}\n\nfunc (s *SelefraSDKClientLogger) Debug(msg string, fields ...zap.Field) {\n\ts.wrappedLog.Debug(msg, fields)\n}\n\nfunc (s *SelefraSDKClientLogger) DebugF(msg string, args ...any) {\n\ts.wrappedLog.Debug(msg, args)\n}\n\nfunc (s *SelefraSDKClientLogger) Info(msg string, fields ...zap.Field) {\n\ts.wrappedLog.Info(msg, fields)\n}\n\nfunc (s *SelefraSDKClientLogger) InfoF(msg string, args ...any) {\n\ts.wrappedLog.Info(msg, args)\n}\n\nfunc (s *SelefraSDKClientLogger) Warn(msg string, fields ...zap.Field) {\n\ts.wrappedLog.Warn(msg, fields)\n}\n\nfunc (s *SelefraSDKClientLogger) WarnF(msg string, args ...any) {\n\ts.wrappedLog.Warn(msg, args)\n}\n\nfunc (s *SelefraSDKClientLogger) Error(msg string, fields ...zap.Field) {\n\ts.wrappedLog.Error(msg, fields)\n}\n\nfunc (s *SelefraSDKClientLogger) ErrorF(msg string, args ...any) {\n\ts.wrappedLog.Error(msg, args)\n}\n\nfunc (s *SelefraSDKClientLogger) Fatal(msg string, fields ...zap.Field) {\n\ts.wrappedLog.Fatal(msg, fields)\n}\n\nfunc (s *SelefraSDKClientLogger) FatalF(msg string, args ...any) {\n\ts.wrappedLog.Fatal(msg, args)\n}\n\nfunc (s *SelefraSDKClientLogger) LogDiagnostics(prefix string, d *schema.Diagnostics) {\n\tif d == nil {\n\t\treturn\n\t}\n\n\tfor _, diagnostic := range d.GetDiagnosticSlice() {\n\n\t\tvar msg string\n\t\tif prefix != \"\" {\n\t\t\tmsg = fmt.Sprintf(\"%s, %s\", prefix, diagnostic.Content())\n\t\t} else {\n\t\t\tmsg = diagnostic.Content()\n\t\t}\n\n\t\tswitch diagnostic.Level() {\n\t\tcase schema.DiagnosisLevelTrace:\n\t\t\ts.Debug(msg)\n\t\tcase schema.DiagnosisLevelDebug:\n\t\t\ts.Debug(msg)\n\t\tcase schema.DiagnosisLevelInfo:\n\t\t\ts.Info(msg)\n\t\tcase schema.DiagnosisLevelWarn:\n\t\t\ts.Warn(msg)\n\t\tcase schema.DiagnosisLevelError:\n\t\t\ts.Error(msg)\n\t\tcase schema.DiagnosisLevelFatal:\n\t\t\ts.Fatal(msg)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/message/message.go",
    "content": "package message\n\nimport (\n\t\"github.com/selefra/selefra-utils/pkg/reflect_util\"\n\t\"sync\"\n)\n\n// Channel Used to link multiple channels and coordinate messaging in tree invocation relationships\ntype Channel[Message any] struct {\n\n\t// Current channel\n\tchannel chan Message\n\n\t// Control of subchannels\n\tsubChannelWg *sync.WaitGroup\n\tselfWg       *sync.WaitGroup\n\n\t// The callback on shutdown\n\tcloseCallbackFunc func()\n\n\t// The current channel processes the message\n\tconsumerFunc func(index int, message Message)\n}\n\nfunc NewChannel[Message any](consumerFunc func(index int, message Message), buffSize ...int) *Channel[Message] {\n\n\t// can have buff\n\tvar channel chan Message\n\tif len(buffSize) != 0 {\n\t\tchannel = make(chan Message, buffSize[0])\n\t} else {\n\t\tchannel = make(chan Message)\n\t}\n\n\tx := &Channel[Message]{\n\t\tchannel:      channel,\n\t\tsubChannelWg: &sync.WaitGroup{},\n\t\tselfWg:       &sync.WaitGroup{},\n\t\tconsumerFunc: consumerFunc,\n\t}\n\n\tx.selfWg.Add(1)\n\tgo func() {\n\n\t\t// The exit of the channel consumer indicates that the channel is closed, and a callback event is triggered when the channel is closed\n\t\tdefer func() {\n\t\t\tx.selfWg.Done()\n\t\t\tif x.closeCallbackFunc != nil {\n\t\t\t\tx.closeCallbackFunc()\n\t\t\t}\n\t\t}()\n\n\t\tcount := 1\n\t\tfor message := range x.channel {\n\t\t\tif x.consumerFunc != nil {\n\t\t\t\tx.consumerFunc(count, message)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn x\n}\n\nfunc (x *Channel[Message]) Send(message Message) {\n\tif !reflect_util.IsNil(message) {\n\t\tx.channel <- message\n\t}\n}\n\nfunc (x *Channel[Message]) MakeChildChannel() *Channel[Message] {\n\n\t// Adds a semaphore to the parent channel\n\tx.subChannelWg.Add(1)\n\n\t// Create a child channel and bridge it to the parent channel\n\tsubChannel := NewChannel[Message](func(index int, message Message) {\n\t\tx.channel <- message\n\t})\n\n\t// Reduces the semaphore of the parent channel when the child channel is turned off\n\tsubChannel.closeCallbackFunc = func() {\n\t\tx.subChannelWg.Done()\n\t}\n\n\treturn subChannel\n}\n\nfunc (x *Channel[Message]) ReceiverWait() {\n\tx.selfWg.Wait()\n}\n\nfunc (x *Channel[Message]) SenderWaitAndClose() {\n\tx.subChannelWg.Wait()\n\tclose(x.channel)\n\tx.selfWg.Wait()\n}\n"
  },
  {
    "path": "pkg/message/message_test.go",
    "content": "package message\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestNewChannel(t *testing.T) {\n\tchannel := NewChannel[string](func(index int, message string) {\n\t\tfmt.Println(message)\n\t})\n\n\tchildChannel := channel.MakeChildChannel()\n\tgo func() {\n\t\tdefer func() {\n\t\t\tchildChannel.SenderWaitAndClose()\n\t\t}()\n\t\tfor i := 0; i < 10; i++ {\n\t\t\ttime.Sleep(time.Second * 1)\n\t\t\tchildChannel.Send(time.Now().String())\n\t\t}\n\t}()\n\n\tchannel.SenderWaitAndClose()\n}\n"
  },
  {
    "path": "pkg/modules/executors/executor.go",
    "content": "package executors\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n)\n\n// Executor Used to execute Module\ntype Executor interface {\n\tName() string\n\n\tExecute(ctx context.Context) *schema.Diagnostics\n}\n"
  },
  {
    "path": "pkg/modules/executors/module_query_executor.go",
    "content": "package executors\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"github.com/hashicorp/go-getter\"\n\t\"github.com/selefra/selefra-provider-sdk/grpc/shard\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/issue\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/modules/planner\"\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"text/template\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// RuleQueryResult Indicates the query result of a rule\ntype RuleQueryResult struct {\n\tInstructions map[string]interface{}\n\t// The index number of the current task\n\tIndex int\n\n\t// Which module does this rule belong to\n\tModule *module.Module\n\n\t// What is the render value after query\n\tRuleBlock *module.RuleBlock\n\n\t// What is the query plan used to query the rules, with some context information and so on\n\tRulePlan *planner.RulePlan\n\n\t// Which version of which provider is used\n\tProvider *registry.Provider\n\n\t// Which configuration is used\n\tProviderConfiguration *module.ProviderBlock\n\n\t// Which database is being queried\n\tSchema string\n\n\t// Find the row of data in issue\n\tRow *schema.Row\n\n\tStatus issue.UploadIssueStream_Rule_Status\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ModuleQueryExecutorOptions Option to perform module queries\ntype ModuleQueryExecutorOptions struct {\n\n\t// Query plan to execute\n\tPlan *planner.ModulePlan\n\n\t// The path to install to\n\tDownloadWorkspace string\n\n\t// Receive real-time message feedback\n\tMessageChannel *message.Channel[*schema.Diagnostics]\n\n\t// The rules detected during query execution are put into this channel\n\tRuleQueryResultChannel *message.Channel[*RuleQueryResult]\n\n\t// Tracking installation progress\n\tProgressTracker getter.ProgressTracker\n\n\t// Used to communicate with the provider\n\tProviderInformationMap map[string]*shard.GetProviderInformationResponse\n\n\t// Each Provider may have multiple Fetch tasks. As long as the policy is bound to the Provider, the policy must be executed for all Storage of the Provider\n\tProviderExpandMap map[string][]*planner.ProviderContext\n\n\t// The number of concurrent queries used\n\tWorkerNum uint64\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst ModuleQueryExecutorName = \"module-query-executor\"\n\ntype ModuleQueryExecutor struct {\n\toptions *ModuleQueryExecutorOptions\n\n\t//ruleMetricCounter *RuleMetricCounter\n\t//ruleMetricChannel chan *RuleMetric\n}\n\nvar _ Executor = &ModuleQueryExecutor{}\n\nfunc NewModuleQueryExecutor(options *ModuleQueryExecutorOptions) *ModuleQueryExecutor {\n\treturn &ModuleQueryExecutor{\n\t\toptions: options,\n\t\t//ruleMetricCounter: NewRuleMetricCounter(),\n\t\t//ruleMetricChannel: make(chan *RuleMetric, 100),\n\t}\n}\n\nfunc (x *ModuleQueryExecutor) Name() string {\n\treturn ModuleQueryExecutorName\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n//func (x *ModuleQueryExecutor) StartMetricWorker() {\n//\tgo func() {\n//\t\tfor metric := range x.ruleMetricChannel {\n//\t\t\tx.ruleMetricCounter.Submit(metric)\n//\t\t}\n//\t}()\n//}\n//\n//func (x *ModuleQueryExecutor) SubmitRuleMetric(rule string, hits int) {\n//\tx.ruleMetricChannel <- &RuleMetric{Rule: rule, HitCount: hits}\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *ModuleQueryExecutor) Execute(ctx context.Context) *schema.Diagnostics {\n\tdefer func() {\n\t\tx.options.MessageChannel.SenderWaitAndClose()\n\t\tx.options.RuleQueryResultChannel.SenderWaitAndClose()\n\t}()\n\n\trulePlanSlice := x.makeRulePlanSlice(ctx, x.options.Plan)\n\tif len(rulePlanSlice) == 0 {\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"module %s no rule need query\", x.options.Plan.BuildFullName()))\n\t\treturn nil\n\t}\n\tchannel := x.toRulePlanChannel(rulePlanSlice)\n\tx.RunQueryWorker(ctx, channel)\n\n\t//close(x.ruleMetricChannel)\n\n\treturn nil\n}\n\nfunc (x *ModuleQueryExecutor) RunQueryWorker(ctx context.Context, channel chan *planner.RulePlan) {\n\twg := sync.WaitGroup{}\n\tfor i := uint64(0); i < x.options.WorkerNum; i++ {\n\t\twg.Add(1)\n\t\tNewModuleQueryExecutorWorker(x, channel, &wg).Run(ctx)\n\t}\n\twg.Wait()\n}\n\nfunc (x *ModuleQueryExecutor) toRulePlanChannel(rulePlanSlice []*planner.RulePlan) chan *planner.RulePlan {\n\trulePlanChannel := make(chan *planner.RulePlan, len(rulePlanSlice))\n\tfor _, rulePlan := range rulePlanSlice {\n\t\tvar filters []module.Filter\n\t\tif rulePlan.Module != nil && rulePlan.Module.ParentModule != nil {\n\t\t\tfor _, pm := range rulePlan.Module.ParentModule.ModulesBlock {\n\t\t\t\tfilters = append(filters, pm.Filter...)\n\t\t\t}\n\t\t}\n\n\t\tfilterFlag := false\n\n\t\tfor _, filter := range filters {\n\t\t\tif filter.Name == rulePlan.RuleBlock.Name {\n\t\t\t\tfilterFlag = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif filterFlag {\n\t\t\tcontinue\n\t\t}\n\t\trulePlanChannel <- rulePlan\n\t}\n\tclose(rulePlanChannel)\n\treturn rulePlanChannel\n}\n\n// All the rule execution plans of the module and submodules are levelled and then placed in a task queue\nfunc (x *ModuleQueryExecutor) makeRulePlanSlice(ctx context.Context, modulePlan *planner.ModulePlan) []*planner.RulePlan {\n\n\trulePlanSlice := make([]*planner.RulePlan, 0)\n\n\t// The rule execution plan for the current module\n\tif len(modulePlan.RulesPlan) != 0 {\n\t\trulePlanSlice = append(rulePlanSlice, modulePlan.RulesPlan...)\n\t}\n\n\t// The execution plan of the submodule\n\tfor _, subModule := range modulePlan.SubModulesPlan {\n\t\trulePlanSlice = append(rulePlanSlice, x.makeRulePlanSlice(ctx, subModule)...)\n\t}\n\n\treturn rulePlanSlice\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ModuleQueryExecutorWorker struct {\n\truleChannel chan *planner.RulePlan\n\twg          *sync.WaitGroup\n\n\tmoduleQueryExecutor *ModuleQueryExecutor\n}\n\nfunc NewModuleQueryExecutorWorker(moduleQueryExecutor *ModuleQueryExecutor, rulePlanChannel chan *planner.RulePlan, wg *sync.WaitGroup) *ModuleQueryExecutorWorker {\n\treturn &ModuleQueryExecutorWorker{\n\t\truleChannel:         rulePlanChannel,\n\t\twg:                  wg,\n\t\tmoduleQueryExecutor: moduleQueryExecutor,\n\t}\n}\n\nfunc (x *ModuleQueryExecutorWorker) Run(ctx context.Context) {\n\tgo func() {\n\t\tdefer func() {\n\t\t\tx.wg.Done()\n\t\t}()\n\t\tx.sendMessage(schema.NewDiagnostics().AddInfo(\"Selefra will load and apply selefra policy with sql and prompt...\\n\"))\n\t\tx.sendMessage(schema.NewDiagnostics().AddInfo(\"Loading and initializing Selefra policy...\\n\"))\n\t\tvar num int\n\t\tvar secRuleMap = make(map[string]int)\n\t\tsecRuleMap[\"Critical\"] = 0\n\t\tsecRuleMap[\"High\"] = 0\n\t\tsecRuleMap[\"Medium\"] = 0\n\t\tsecRuleMap[\"Low\"] = 0\n\t\tsecRuleMap[\"Informational\"] = 0\n\t\tvar secMap = make(map[string]int)\n\t\tsecMap[\"Critical\"] = 0\n\t\tsecMap[\"High\"] = 0\n\t\tsecMap[\"Medium\"] = 0\n\t\tsecMap[\"Low\"] = 0\n\t\tsecMap[\"Informational\"] = 0\n\t\tvar plans []*planner.RulePlan\n\t\tfor plan := range x.ruleChannel {\n\t\t\tplans = append(plans, plan)\n\t\t\tnum++\n\t\t\tif plan.MetadataBlock != nil {\n\t\t\t\tsecRuleMap[plan.MetadataBlock.Severity]++\n\t\t\t}\n\t\t\tx.sendMessage(schema.NewDiagnostics().AddInfo(\"\\t- \\\"%s\\\" Rule Completed\", plan.RuleBlock.Name))\n\t\t}\n\t\tCritical := cli_ui.MagentaColor(fmt.Sprintf(\"%d Critical\", secRuleMap[\"Critical\"]))\n\t\tHigh := cli_ui.RedColor(fmt.Sprintf(\"%d High\", secRuleMap[\"High\"]))\n\t\tMedium := cli_ui.YellowColor(fmt.Sprintf(\"%d Medium\", secRuleMap[\"Medium\"]))\n\t\tLow := cli_ui.BlueColor(fmt.Sprintf(\"%d Low\", secRuleMap[\"Low\"]))\n\t\tInformational := cli_ui.GreenColor(fmt.Sprintf(\"%d Informational\", secRuleMap[\"Informational\"]))\n\n\t\tx.sendMessage(schema.NewDiagnostics().AddInfo(\"\\nLoaded: %d policies to loaded, %s , %s , %s , %s , %s.\\n\", num, Critical, High, Medium, Low, Informational))\n\n\t\tfor i := range plans {\n\t\t\tx.execRulePlan(ctx, plans[i], secMap)\n\t\t}\n\n\t\ttotel := 0\n\t\tfor s := range secMap {\n\t\t\ttotel += secMap[s]\n\t\t}\n\t\tsecCritical := cli_ui.MagentaColor(fmt.Sprintf(\"%d Critical\", secMap[\"Critical\"]))\n\t\tsecHigh := cli_ui.RedColor(fmt.Sprintf(\"%d High\", secMap[\"High\"]))\n\t\tsecMedium := cli_ui.YellowColor(fmt.Sprintf(\"%d Medium\", secMap[\"Medium\"]))\n\t\tsecLow := cli_ui.BlueColor(fmt.Sprintf(\"%d Low\", secMap[\"Low\"]))\n\t\tsecInformational := cli_ui.GreenColor(fmt.Sprintf(\"%d Informational\", secMap[\"Informational\"]))\n\n\t\tx.sendMessage(schema.NewDiagnostics().AddInfo(\"Summary: Total %d Issues, %s , %s , %s , %s , %s.\\n\", totel, secCritical, secHigh, secMedium, secLow, secInformational))\n\t}()\n}\n\nfunc (x *ModuleQueryExecutorWorker) sendMessage(diagnostics *schema.Diagnostics) {\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tx.moduleQueryExecutor.options.MessageChannel.Send(diagnostics)\n\t}\n}\n\nfunc (x *ModuleQueryExecutorWorker) execRulePlan(ctx context.Context, rulePlan *planner.RulePlan, secMap map[string]int) {\n\tSeverity := fmt.Sprintf(\"[%s] \", rulePlan.MetadataBlock.Severity)\n\n\tTitle := rulePlan.MetadataBlock.Title\n\n\tvar f *os.File\n\tvar err error\n\tif x.moduleQueryExecutor != nil &&\n\t\tx.moduleQueryExecutor.options.Plan != nil &&\n\t\tx.moduleQueryExecutor.options.Plan.Instruction != nil &&\n\t\tx.moduleQueryExecutor.options.Plan.Instruction[\"dir\"] != nil {\n\t\tdir := x.moduleQueryExecutor.options.Plan.Instruction[\"dir\"].(string)\n\t\tfiltPath := filepath.Join(dir, \"output\")\n\t\tif _, err := os.Stat(filtPath); os.IsNotExist(err) {\n\t\t\tos.MkdirAll(filtPath, os.ModePerm)\n\t\t}\n\t\tfileName := filepath.Join(filtPath, Title+\".txt\")\n\t\tf, err = os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer f.Close()\n\t}\n\n\tstr := utils.GenerateString(Severity+Title, \"·\", \"%d\")\n\tswitch rulePlan.MetadataBlock.Severity {\n\tcase \"Critical\":\n\t\tstr = cli_ui.MagentaColor(str)\n\tcase \"High\":\n\t\tstr = cli_ui.RedColor(str)\n\tcase \"Medium\":\n\t\tstr = cli_ui.YellowColor(str)\n\tcase \"Low\":\n\t\tstr = cli_ui.BlueColor(str)\n\tcase \"Informational\":\n\t\tstr = cli_ui.GreenColor(str)\n\t}\n\tstr += fmt.Sprintf(\"\\nDescription: %s\\n\", rulePlan.MetadataBlock.Description)\n\tstr += fmt.Sprintf(\"Results:\\n\")\n\tvar num int\n\tstoragesMap := x.moduleQueryExecutor.options.ProviderExpandMap\n\tfor _, storages := range storagesMap {\n\t\tfor _, storage := range storages {\n\t\t\toutput, snum := x.execStorageQuery(ctx, rulePlan, storage)\n\t\t\tif f != nil {\n\t\t\t\tf.WriteString(output)\n\t\t\t}\n\t\t\tnum += snum\n\t\t\tstr += output\n\t\t\t// TODO Stage log\n\t\t}\n\t}\n\n\t// TODO log\n\n\tdefer func() {\n\t\tif num > 0 {\n\t\t\tlogStr := fmt.Sprintf(str, num)\n\t\t\tsecMap[rulePlan.RuleBlock.MetadataBlock.Severity] += num\n\t\t\tx.sendMessage(schema.NewDiagnostics().AddInfo(logStr))\n\t\t}\n\t}()\n}\n\nfunc isSql(query string) bool {\n\tquery = strings.ToLower(query)\n\tif strings.Contains(query, \"select\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (x *ModuleQueryExecutorWorker) FmtOutputStr(rule *module.RuleBlock, providerContext *planner.ProviderContext) (logStr string) {\n\tvar t = \"default\"\n\tif x.moduleQueryExecutor != nil && x.moduleQueryExecutor.options.Plan != nil && x.moduleQueryExecutor.options.Plan.Instruction != nil && x.moduleQueryExecutor.options.Plan.Instruction[\"output\"] != \"\" {\n\t\tif s, ok := x.moduleQueryExecutor.options.Plan.Instruction[\"output\"].(string); ok {\n\t\t\tt = strings.ToLower(s)\n\t\t}\n\t}\n\tswitch t {\n\tcase \"json\":\n\t\tm := make(map[string]interface{})\n\t\tm[\"schema\"] = providerContext.Schema\n\t\tm[\"policy\"] = rule.Query\n\t\tm[\"labels\"] = rule.Labels\n\t\tm[\"metadata\"] = rule.MetadataBlock\n\t\tm[\"output\"] = rule.Output\n\t\tjsonBytes, err := json.MarshalIndent(m, \"\", \"  \")\n\t\tif err != nil {\n\t\t\tfmt.Println(\"JSON marshal error:\", err)\n\t\t\treturn\n\t\t}\n\t\tlogStr = string(jsonBytes)\n\tdefault:\n\t\tlabelsStr := \"\"\n\t\tfor key, label := range rule.Labels {\n\t\t\tif utils.HasOne([]string{\"resource_account_id\", \"resource_id\", \"resource_region\", \"resource_type\"}, key) {\n\t\t\t\tlabelsStr += fmt.Sprintf(\"%s \", label)\n\t\t\t}\n\t\t}\n\t\tlogStr = utils.GenerateString(\"\\t\"+rule.Output, \" \", labelsStr+\"\\n\")\n\t}\n\treturn logStr\n}\n\nfunc (x *ModuleQueryExecutorWorker) execStorageQuery(ctx context.Context, rulePlan *planner.RulePlan, providerContext *planner.ProviderContext) (outputStr string, num int) {\n\t// Query whether it is gpt through query statement\n\tresultStr := \"\"\n\tif isSql(rulePlan.Query) {\n\t\tresultSet, diagnostics := providerContext.Storage.Query(ctx, rulePlan.Query)\n\t\tif utils.HasError(diagnostics) {\n\t\t\tx.sendMessage(schema.NewDiagnostics().AddErrorMsg(\"rule %s exec error: %s\", rulePlan.String(), diagnostics.ToString()))\n\t\t\treturn \"\", 0\n\t\t}\n\n\t\t// TODO Print log prompt\n\t\t//x.moduleQueryExecutor.options.MessageChannel <- schema.NewDiagnostics().AddInfo(\"\")\n\t\t//cli_ui.Infof(\"%rootConfig - Rule \\\"%rootConfig\\\"\\n\", rule.Path, rule.Name)\n\t\t//cli_ui.Infoln(\"Schema:\")\n\t\t//cli_ui.Infoln(schema + \"\\n\")\n\t\t//cli_ui.Infoln(\"Description:\")\n\t\tvar resource_ids = []string{\"\"}\n\t\tvar resource_id_key string\n\t\tresource_id_key, ok := rulePlan.Labels[\"resource_id\"].(string)\n\t\tif ok {\n\t\t\tresource_id_key = extractKey(resource_id_key)\n\t\t}\n\t\tfor {\n\t\t\trows, d := resultSet.ReadRows(100)\n\t\t\tif rows != nil {\n\t\t\t\tfor _, row := range rows.SplitRowByRow() {\n\t\t\t\t\tresult := x.processRuleRow(ctx, rulePlan, providerContext, row, issue.UploadIssueStream_Rule_FAILED)\n\t\t\t\t\tif result == nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tnum++\n\t\t\t\t\tresource_id, ok := result.RuleBlock.Labels[\"resource_id\"].(string)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tresource_ids = append(resource_ids, resource_id)\n\t\t\t\t\t}\n\t\t\t\t\tresultStr += x.FmtOutputStr(result.RuleBlock, providerContext)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif utils.HasError(d) {\n\t\t\t\tx.sendMessage(d)\n\t\t\t}\n\t\t\tif rows == nil || rows.RowCount() == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif strings.TrimSpace(rulePlan.RuleBlock.MetadataBlock.MainTable) == \"\" && (resource_id_key == \"\" || resource_id_key == \"no available\") {\n\t\t\treturn\n\t\t}\n\t\tfor i := range resource_ids {\n\t\t\tresource_ids[i] = fmt.Sprintf(\"'%s'\", resource_ids[i])\n\t\t}\n\t\tsafeQueryTemp := fmt.Sprintf(\"SELECT * FROM %s WHERE \\\"%s\\\" NOT IN (%s)\", rulePlan.RuleBlock.MetadataBlock.MainTable, resource_id_key, strings.Join(resource_ids, \",\"))\n\n\t\tsafeSet, diagnostics := providerContext.Storage.Query(ctx, safeQueryTemp)\n\t\tif utils.HasError(diagnostics) {\n\t\t\tx.sendMessage(schema.NewDiagnostics().AddErrorMsg(\"rule %s exec error: %s\", rulePlan.String(), diagnostics.ToString()))\n\t\t\treturn \"\", 0\n\t\t}\n\n\t\tfor {\n\t\t\trows, d := safeSet.ReadRows(100)\n\t\t\tif rows != nil {\n\t\t\t\tfor _, row := range rows.SplitRowByRow() {\n\t\t\t\t\t_ = x.processRuleRow(ctx, rulePlan, providerContext, row, issue.UploadIssueStream_Rule_SUCCESS)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif utils.HasError(d) {\n\t\t\t\tx.sendMessage(d)\n\t\t\t}\n\t\t\tif rows == nil || rows.RowCount() == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\topenaiApiKey := rulePlan.Module.SelefraBlock.GetOpenaiApiKey()\n\t\topenaiMode := rulePlan.Module.SelefraBlock.GetOpenaiMode()\n\t\topenaiLimit := rulePlan.Module.SelefraBlock.GetOpenaiLimit()\n\n\t\ttypeRes, err := utils.OpenApiClient(ctx, openaiApiKey, openaiMode, \"type\", rulePlan.Query)\n\t\tif err != nil {\n\t\t\tfmt.Println(err.Error())\n\t\t\treturn \"\", 0\n\t\t}\n\t\ttar := strings.Split(typeRes, \" & \")\n\t\tty := tar[0]\n\t\tprovider := tar[1]\n\n\t\tschameTmp := `SELECT table_schema, table_name \nFROM information_schema.tables \nWHERE table_type = 'BASE TABLE' \nAND table_name <> 'selefra_meta_kv'\nAND table_schema = '%s';`\n\n\t\tschameSql := fmt.Sprintf(schameTmp, providerContext.Schema)\n\t\ttableName := \"\"\n\t\ttableNames := []string{}\n\t\tresultSet, diagnostics := providerContext.Storage.Query(ctx, schameSql)\n\t\tif utils.HasError(diagnostics) {\n\t\t\tx.sendMessage(schema.NewDiagnostics().AddErrorMsg(\"rule %s exec error: %s\", rulePlan.String(), diagnostics.ToString()))\n\t\t\treturn \"\", 0\n\t\t}\n\t\tfor {\n\t\t\trows, d := resultSet.ReadRows(-1)\n\t\t\tif rows != nil {\n\t\t\t\tfor _, row := range rows.SplitRowByRow() {\n\t\t\t\t\ttable, err := row.Get(\"table_name\")\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\t}\n\t\t\t\t\ttableName += table.(string) + \",\"\n\t\t\t\t\tif len(tableName) > 4000 {\n\t\t\t\t\t\ttableNames = append(tableNames, tableName)\n\t\t\t\t\t\ttableName = \"\"\n\t\t\t\t\t}\n\t\t\t\t\t//x.processRuleRow(ctx, rulePlan, providerContext, row)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif utils.HasError(d) {\n\t\t\t\tx.sendMessage(d)\n\t\t\t}\n\t\t\tif rows == nil || rows.RowCount() == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\ttableNames = append(tableNames, tableName)\n\t\ttables, err := x.filterTables(ctx, tableNames, ty, openaiApiKey, openaiMode, rulePlan)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn \"\", 0\n\t\t}\n\t\ttables = utils.RemoveRepeatedElement(tables)\n\t\tcolumnMap, err := x.filterColumns(ctx, tables, providerContext, ty, openaiApiKey, openaiMode, rulePlan)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn \"\", 0\n\t\t}\n\t\tfor k := range columnMap {\n\t\t\tif len(columnMap[k]) == 0 {\n\t\t\t\tdelete(columnMap, k)\n\t\t\t}\n\t\t}\n\t\trows, err := x.getRows(ctx, columnMap, providerContext, openaiLimit, rulePlan)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn \"\", 0\n\t\t}\n\t\tlimit := int(openaiLimit)\n\t\tif len(rows) < int(openaiLimit) {\n\t\t\tlimit = len(rows)\n\t\t}\n\t\tresultStr, num, err = x.getIssue(ctx, rows[:limit], openaiApiKey, openaiMode, ty, provider, tableName, *rulePlan, providerContext)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn \"\", 0\n\t\t}\n\t}\n\treturn resultStr, num\n}\n\n// Process the row queried by the rule\nfunc (x *ModuleQueryExecutorWorker) processRuleRow(ctx context.Context, rulePlan *planner.RulePlan, storage *planner.ProviderContext, row *schema.Row, Status issue.UploadIssueStream_Rule_Status) *RuleQueryResult {\n\trowScope := planner.ExtendScope(rulePlan.RuleScope)\n\n\t// Inject the queried rows into the scope\n\tvalues := row.GetValues()\n\tfor index, columnName := range row.GetColumnNames() {\n\t\trowScope.SetVariable(columnName, values[index])\n\t}\n\n\t// Render the actual values for the query results of the rule\n\truleBlockResult, diagnostics := x.renderRule(ctx, rulePlan, rowScope)\n\tif utils.HasError(diagnostics) {\n\t\tx.moduleQueryExecutor.options.MessageChannel.Send(diagnostics)\n\t\treturn nil\n\t}\n\n\tresult := &RuleQueryResult{\n\t\tInstructions:          x.moduleQueryExecutor.options.Plan.Instruction,\n\t\tModule:                rulePlan.Module,\n\t\tRulePlan:              rulePlan,\n\t\tRuleBlock:             ruleBlockResult,\n\t\tProvider:              registry.NewProvider(storage.ProviderName, storage.ProviderVersion),\n\t\tProviderConfiguration: storage.ProviderConfiguration,\n\t\tSchema:                storage.Schema,\n\t\tRow:                   row,\n\t\tStatus:                Status,\n\t}\n\tx.moduleQueryExecutor.options.RuleQueryResultChannel.Send(result)\n\treturn result\n\t//x.sendMessage(schema.NewDiagnostics().AddInfo(json_util.ToJsonString(ruleBlockResult)))\n\n}\n\nfunc (x *ModuleQueryExecutorWorker) renderRule(ctx context.Context, rulePlan *planner.RulePlan, rowScope *planner.Scope) (*module.RuleBlock, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\truleBlock := rulePlan.RuleBlock.Copy()\n\n\t// Start rendering the dependent variables\n\t// name\n\tif ruleBlock.Name != \"\" {\n\t\truleName, err := rowScope.RenderingTemplate(rulePlan.Name, rulePlan.Name)\n\t\tif err != nil {\n\t\t\t// TODO Construct error context\n\t\t\treturn nil, diagnostics.AddErrorMsg(\"render rule name error: %s\", err.Error())\n\t\t}\n\t\truleBlock.Name = ruleName\n\t}\n\n\t// labels\n\tif len(ruleBlock.Labels) > 0 {\n\t\tlabels := make(map[string]interface{})\n\t\tfor key, value := range rulePlan.Labels {\n\t\t\tv, ok := value.(string)\n\t\t\tif ok {\n\t\t\t\tnewValue, err := rowScope.RenderingTemplate(v, v)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// TODO Construct error context\n\t\t\t\t\treturn nil, diagnostics.AddErrorMsg(\"render rule labels error: %s\", err.Error())\n\t\t\t\t}\n\t\t\t\tlabels[key] = newValue\n\t\t\t}\n\t\t}\n\t\truleBlock.Labels = labels\n\t}\n\n\t// output\n\tif ruleBlock.Output != \"\" {\n\t\toutput, err := rowScope.RenderingTemplate(rulePlan.Output, rulePlan.Output)\n\t\tif err != nil {\n\t\t\t// TODO Construct error context\n\t\t\treturn nil, diagnostics.AddErrorMsg(\"render output labels error: %s\", err.Error())\n\t\t}\n\t\truleBlock.Output = output\n\t}\n\n\t// Rendering of metadata blocks\n\td := x.renderRuleMetadata(ctx, rulePlan, ruleBlock, rowScope)\n\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\treturn nil, diagnostics\n\t}\n\n\treturn ruleBlock, diagnostics\n}\n\n// A block of render policy metadata\nfunc (x *ModuleQueryExecutorWorker) renderRuleMetadata(ctx context.Context, rulePlan *planner.RulePlan, ruleBlock *module.RuleBlock, rowScope *planner.Scope) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\tvar err error\n\n\tif ruleBlock.MetadataBlock == nil {\n\t\treturn nil\n\t}\n\tmetadata := ruleBlock.MetadataBlock\n\n\t// description\n\tif metadata.Description != \"\" {\n\t\tmetadata.Description, err = rowScope.RenderingTemplate(metadata.Description, metadata.Description)\n\t\tif err != nil {\n\t\t\t// TODO\n\t\t\treturn diagnostics.AddErrorMsg(\"rendering rule description error: %s \", err.Error())\n\t\t}\n\t}\n\n\t// title\n\tif metadata.Title != \"\" {\n\t\tmetadata.Title, err = rowScope.RenderingTemplate(metadata.Title, metadata.Title)\n\t\tif err != nil {\n\t\t\t// TODO\n\t\t\treturn diagnostics.AddErrorMsg(\"rendering rule title error: %s \", err.Error())\n\t\t}\n\t}\n\n\t// Read the text of the fix, if necessary\n\tif metadata.Remediation != \"\" {\n\t\tmarkdownFileFullPath := filepath.Join(rulePlan.Module.ModuleLocalDirectory, metadata.Remediation)\n\t\tfile, err := os.ReadFile(markdownFileFullPath)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t\t//return diagnostics.AddErrorMsg(\"read file %s error: %s\", markdownFileFullPath, err.Error())\n\t\t}\n\t\tmetadata.Remediation = string(file)\n\t}\n\n\t// tags\n\tif len(metadata.Tags) != 0 {\n\t\tnewTags := make([]string, len(metadata.Tags))\n\t\tfor index, tag := range metadata.Tags {\n\t\t\tnewTag, err := rowScope.RenderingTemplate(tag, tag)\n\t\t\tif err != nil {\n\t\t\t\t// TODO\n\t\t\t\treturn diagnostics.AddErrorMsg(\"rendering tag error: %s\", err.Error())\n\t\t\t}\n\t\t\tnewTags[index] = newTag\n\t\t}\n\t\tmetadata.Tags = newTags\n\t}\n\n\t// author\n\tif metadata.Author != \"\" {\n\t\tauthor, err := rowScope.RenderingTemplate(metadata.Author, metadata.Author)\n\t\tif err != nil {\n\t\t\t// TODO\n\t\t\treturn diagnostics.AddErrorMsg(\"render author error: %s\", err.Error())\n\t\t}\n\t\tmetadata.Author = author\n\t}\n\n\t// provider\n\tif metadata.Provider != \"\" {\n\t\tprovider, err := rowScope.RenderingTemplate(metadata.Provider, metadata.Provider)\n\t\tif err != nil {\n\t\t\t// TODO\n\t\t\treturn diagnostics.AddErrorMsg(\"render provider error: %s\", err.Error())\n\t\t}\n\t\tmetadata.Provider = provider\n\t}\n\n\t// severity\n\tif metadata.Severity != \"\" {\n\t\tseverity, err := rowScope.RenderingTemplate(metadata.Severity, metadata.Severity)\n\t\tif err != nil {\n\t\t\t// TODO\n\t\t\treturn diagnostics.AddErrorMsg(\"render severity error: %s\", err.Error())\n\t\t}\n\t\tmetadata.Severity = severity\n\t}\n\n\t// id\n\tif metadata.Id != \"\" {\n\t\tid, err := rowScope.RenderingTemplate(metadata.Id, metadata.Id)\n\t\tif err != nil {\n\t\t\t// TODO\n\t\t\treturn diagnostics.AddErrorMsg(\"render id error: %s\", err.Error())\n\t\t}\n\t\tmetadata.Id = id\n\t}\n\n\treturn diagnostics\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n//type RuleMetricCounter struct {\n//\truleMetricMap map[string]*RuleMetric\n//}\n//\n//func NewRuleMetricCounter() *RuleMetricCounter {\n//\treturn &RuleMetricCounter{\n//\t\truleMetricMap: make(map[string]*RuleMetric),\n//\t}\n//}\n//\n//func (x *RuleMetricCounter) Submit(ruleMetric *RuleMetric) {\n//\tif ruleMetric == nil {\n//\t\treturn\n//\t}\n//\tlastRule, exists := x.ruleMetricMap[ruleMetric.Rule]\n//\tif !exists {\n//\t\tx.ruleMetricMap[ruleMetric.Rule] = ruleMetric\n//\t\treturn\n//\t} else {\n//\t\tx.ruleMetricMap[ruleMetric.Rule] = ruleMetric.Merge(lastRule)\n//\t}\n//}\n//\n//// ------------------------------------------------- --------------------------------------------------------------------\n//\n//type RuleMetric struct {\n//\tRule     string\n//\tHitCount int\n//}\n//\n//func (x *RuleMetric) Merge(other *RuleMetric) *RuleMetric {\n//\tif x == nil {\n//\t\treturn other\n//\t} else if other == nil {\n//\t\treturn x\n//\t}\n//\tif x.Rule != other.Rule {\n//\t\treturn nil\n//\t}\n//\treturn &RuleMetric{\n//\t\tRule:     x.Rule,\n//\t\tHitCount: x.HitCount + other.HitCount,\n//\t}\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n//// create table name to provider name mapping\n//func (x *ModuleQueryExecutor) buildTableToProviderMap() (map[string]string, *schema.Diagnostics) {\n//\tdiagnostics := schema.NewDiagnostics()\n//\ttableToProviderMap := make(map[string]string, 0)\n//\tfor providerName, providerPlugin := range x.options.ProviderPluginMap {\n//\t\tinformation, err := providerPlugin.Provider().GetProviderInformation(context.Background(), &shard.GetProviderInformationRequest{})\n//\t\tif err != nil {\n//\t\t\treturn nil, diagnostics\n//\t\t}\n//\t\tif diagnostics.AddDiagnostics(information.Diagnostics).HasError() {\n//\t\t\treturn nil, diagnostics\n//\t\t}\n//\t\tfor tableName := range information.Tables {\n//\t\t\ttableToProviderMap[tableName] = providerName\n//\t\t}\n//\t}\n//\treturn tableToProviderMap, diagnostics\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc fmtTemplate(temp string, params map[string]interface{}) (string, error) {\n\tt, err := template.New(\"temp\").Parse(temp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tb := bytes.Buffer{}\n\terr = t.Execute(&b, params)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tby, err := io.ReadAll(&b)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(by), nil\n}\n\nfunc (x *ModuleQueryExecutorWorker) filterTables(ctx context.Context, tableNames []string, ty string, openaiApiKey string, openaiMode string, rulePlan *planner.RulePlan) (tables []string, err error) {\n\tfor i := range tableNames {\n\t\ttable, err := utils.OpenApiClient(ctx, openaiApiKey, openaiMode, ty+\"Table\", rulePlan.Query, tableNames[i])\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\ttable = strings.Trim(table, \" \")\n\t\ttables = append(tables, strings.Split(table, \",\")...)\n\t}\n\treturn tables, nil\n}\n\nfunc (x *ModuleQueryExecutorWorker) filterColumns(ctx context.Context, tables []string, providerContext *planner.ProviderContext, ty string, openaiApiKey string, openaiMode string, rulePlan *planner.RulePlan) (map[string][]string, error) {\n\n\tcloumnSql := `\nSELECT table_name, column_name\nFROM information_schema.columns\nWHERE table_name in (%s) AND table_schema = '%s';\n`\n\tfor i := range tables {\n\t\ttables[i] = \"'\" + tables[i] + \"'\"\n\t}\n\tcloumnSql = fmt.Sprintf(cloumnSql, strings.Join(tables, \",\"), providerContext.Schema)\n\tcolumnRes, columnDiagnostics := providerContext.Storage.Query(ctx, cloumnSql)\n\tcolumnMap := make(map[string][]string)\n\tif utils.HasError(columnDiagnostics) {\n\t\tx.sendMessage(schema.NewDiagnostics().AddErrorMsg(\"rule %s exec error: %s\", rulePlan.String(), columnDiagnostics.ToString()))\n\t\treturn columnMap, fmt.Errorf(\"rule %s exec error: %s\", rulePlan.String(), columnDiagnostics.ToString())\n\t}\n\tfor {\n\t\trows, d := columnRes.ReadRows(-1)\n\t\tif rows != nil {\n\t\t\tfor _, row := range rows.SplitRowByRow() {\n\t\t\t\ttable_name, err := row.Get(\"table_name\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t}\n\t\t\t\tcolumn_name, err := row.Get(\"column_name\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t}\n\t\t\t\tcolumnMap[table_name.(string)] = append(columnMap[table_name.(string)], column_name.(string))\n\t\t\t\t//x.processRuleRow(ctx, rulePlan, providerContext, row)\n\t\t\t}\n\t\t}\n\t\tif utils.HasError(d) {\n\t\t\tx.sendMessage(d)\n\t\t}\n\t\tif rows == nil || rows.RowCount() == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor s := range columnMap {\n\t\tcolumnMap[s] = utils.RemoveRepeatedElement(columnMap[s])\n\t\tif len(columnMap[s]) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tColumns, err := utils.OpenApiClient(ctx, openaiApiKey, openaiMode, ty+\"Column\", rulePlan.Query, s, strings.Join(columnMap[s], \",\"))\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tif Columns != \"\" {\n\t\t\tColumns = strings.Trim(Columns, \"\\nAnswer:\\n\")\n\t\t\tColumns = strings.Trim(Columns, \".\")\n\t\t\tColumnsArr := strings.Split(Columns, \",\")\n\t\t\tColumnsNeedArr := make([]string, 0)\n\t\t\tfor i := range ColumnsArr {\n\t\t\t\tfor i2 := range columnMap[s] {\n\t\t\t\t\tif ColumnsArr[i] == columnMap[s][i2] {\n\t\t\t\t\t\tColumnsNeedArr = append(ColumnsNeedArr, columnMap[s][i2])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcolumnMap[s] = ColumnsNeedArr\n\t\t}\n\t}\n\treturn columnMap, nil\n}\n\nfunc (x *ModuleQueryExecutorWorker) getRows(ctx context.Context, columnMap map[string][]string, providerContext *planner.ProviderContext, openaiLimit uint64, rulePlan *planner.RulePlan) (rs []*schema.Row, err error) {\n\tfor table := range columnMap {\n\t\tsql := fmt.Sprintf(\"SELECT %s FROM %s.%s LIMIT %d\", strings.Join(columnMap[table], \",\"), providerContext.Schema, table, openaiLimit)\n\t\tinfoRes, infoDiagnostics := providerContext.Storage.Query(ctx, sql)\n\t\tif utils.HasError(infoDiagnostics) {\n\t\t\tx.sendMessage(schema.NewDiagnostics().AddErrorMsg(\"rule %s exec error: %s\", rulePlan.String(), infoDiagnostics.ToString()))\n\t\t\treturn rs, fmt.Errorf(\"rule %s exec error: %s\", rulePlan.String(), infoDiagnostics.ToString())\n\t\t}\n\t\tfor {\n\t\t\trows, d := infoRes.ReadRows(-1)\n\t\t\tif rows != nil {\n\t\t\t\tfor _, row := range rows.SplitRowByRow() {\n\t\t\t\t\trs = append(rs, row)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif utils.HasError(d) {\n\t\t\t\tx.sendMessage(d)\n\t\t\t}\n\t\t\tif rows == nil || rows.RowCount() == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn rs, nil\n}\n\nfunc (x *ModuleQueryExecutorWorker) getIssue(ctx context.Context, rows []*schema.Row, openaiApiKey, openaiMode, ty, provider, tableName string, rulePlan planner.RulePlan, providerContext *planner.ProviderContext) (resultStr string, num int, err error) {\n\tvar resource_id = []string{\n\t\t\"name\", \"arn\", \"id\", \"Not available\", \"security_group_id\", \"account_id\", \"region\", \"account\", \"db_instance_id\", \"user_name\", \"org\", \"customer_id\", \"email\", \"subscription_id\", \"real_name\", \"instance_id\", \"product_id\", \"full_name\", \"cluster_id\", \"function_arn\", \"device_name\", \"role_name\", \"friendly_name\", \"subject\", \"disk_id\", \"user_email\", \"public_ip\", \"member_id\", \"user_arn\", \"load_balancer_arn\", \"username\", \"schema_id\", \"access_key_id\", \"html_url\", \"cluster_arn\", \"group_arn\",\n\t}\n\tfor _, row := range rows {\n\t\tinfo, err := utils.OpenApiClient(ctx, openaiApiKey, openaiMode, ty, provider, tableName, row, rulePlan.Query)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\n\t\tvar infoBlockResult []module.GptResponseBlock\n\t\terr = json.Unmarshal([]byte(info), &infoBlockResult)\n\t\tif err != nil {\n\t\t\t//fmt.Println(err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tfor i := range infoBlockResult {\n\t\t\tif rulePlan.MetadataBlock == nil {\n\t\t\t\trulePlan.MetadataBlock = &module.RuleMetadataBlock{}\n\t\t\t}\n\t\t\tmetablock := *rulePlan.MetadataBlock\n\n\t\t\tmetablock.Title = infoBlockResult[i].Title\n\t\t\tmetablock.Description = infoBlockResult[i].Description\n\t\t\tmetablock.Remediation = infoBlockResult[i].Remediation\n\t\t\tmetablock.Severity = infoBlockResult[i].Severity\n\t\t\tmetablock.Tags = infoBlockResult[i].Tags\n\t\t\tmetablock.Author = \"Selefra\"\n\t\t\ttempMap := make(map[string]interface{})\n\t\t\tkeys := row.GetColumnNames()\n\n\t\t\tfor i2 := range keys {\n\t\t\t\ttempMap[keys[i2]], _ = row.Get(keys[i2])\n\t\t\t}\n\t\t\tresourceKey := utils.FindFirstSameKeyInTwoStringArray(resource_id, keys)\n\t\t\tif resourceKey != \"\" {\n\t\t\t\ttempMap[\"resource\"], _ = row.Get(resourceKey)\n\t\t\t} else {\n\t\t\t\ttempMap[\"resource\"] = infoBlockResult[i].Resource\n\t\t\t}\n\t\t\ttempMap[\"title\"] = infoBlockResult[i].Title\n\t\t\ttempMap[\"description\"] = infoBlockResult[i].Description\n\t\t\ttempMap[\"remediation\"] = infoBlockResult[i].Remediation\n\t\t\ttempMap[\"severity\"] = infoBlockResult[i].Severity\n\t\t\ttempMap[\"tags\"] = infoBlockResult[i].Tags\n\t\t\tout, err := fmtTemplate(rulePlan.Output, tempMap)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\truleBlockResult := &module.RuleBlock{\n\t\t\t\tName:          rulePlan.Name,\n\t\t\t\tQuery:         rulePlan.Query,\n\t\t\t\tLabels:        rulePlan.Labels,\n\t\t\t\tMetadataBlock: &metablock,\n\t\t\t\tOutput:        out,\n\t\t\t}\n\n\t\t\tresult := &RuleQueryResult{\n\t\t\t\tInstructions:          x.moduleQueryExecutor.options.Plan.Instruction,\n\t\t\t\tModule:                rulePlan.Module,\n\t\t\t\tRulePlan:              &rulePlan,\n\t\t\t\tRuleBlock:             ruleBlockResult,\n\t\t\t\tProvider:              registry.NewProvider(providerContext.ProviderName, providerContext.ProviderVersion),\n\t\t\t\tProviderConfiguration: providerContext.ProviderConfiguration,\n\t\t\t\tSchema:                providerContext.Schema,\n\t\t\t\tRow:                   row,\n\t\t\t}\n\n\t\t\tif result != nil {\n\t\t\t\tnum++\n\t\t\t\tresultStr += x.FmtOutputStr(result.RuleBlock, providerContext)\n\t\t\t}\n\t\t\tx.moduleQueryExecutor.options.RuleQueryResultChannel.Send(result)\n\t\t}\n\t}\n\treturn resultStr, num, nil\n}\n\nfunc extractKey(str string) string {\n\t// 匹配 {{ .key }} 格式的字符串\n\tre := regexp.MustCompile(`{{\\s*\\.(.*?)\\s*}}`)\n\tmatches := re.FindStringSubmatch(str)\n\tif len(matches) > 1 {\n\t\treturn matches[1]\n\t}\n\n\t// 如果没有匹配到 {{ .key }} 格式的字符串，则尝试直接提取键\n\tre = regexp.MustCompile(`\\b(\\w+)\\b`)\n\tmatches = re.FindStringSubmatch(str)\n\tif len(matches) > 1 {\n\t\treturn matches[1]\n\t}\n\n\treturn \"\" // 如果没有匹配到任何键，则返回空字符串\n}\n"
  },
  {
    "path": "pkg/modules/executors/module_query_executor_test.go",
    "content": "package executors\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestModuleQueryExecutor_Execute(t *testing.T) {\n\n\tprojectWorkspace := \"./test_data/test_query_module\"\n\tdownloadWorkspace := \"./test_download\"\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.ToString())\n\t\t}\n\t})\n\n\tInstructions := make(map[string]interface{})\n\tInstructions[\"query\"] = \"Please help me analyze the vulnerabilities in AWS S3?\"\n\tInstructions[\"openai_api_key\"] = \"xx\"\n\tInstructions[\"openai_mode\"] = \"gpt-3.5\"\n\tInstructions[\"openai_limit\"] = uint64(10)\n\n\td := NewProjectLocalLifeCycleExecutor(&ProjectLocalLifeCycleExecutorOptions{\n\t\tInstruction:                          Instructions,\n\t\tProjectWorkspace:                     projectWorkspace,\n\t\tDownloadWorkspace:                    downloadWorkspace,\n\t\tMessageChannel:                       messageChannel,\n\t\tProjectLifeCycleStep:                 ProjectLifeCycleStepQuery,\n\t\tFetchStep:                            FetchStepFetch,\n\t\tProjectCloudLifeCycleExecutorOptions: nil,\n\t\tDSN:                                  \"host=127.0.0.1 user=postgres password=pass port=5432 dbname=postgres sslmode=disable\",\n\t\tFetchWorkerNum:                       1,\n\t\tQueryWorkerNum:                       1,\n\t}).Execute(context.Background())\n\tmessageChannel.ReceiverWait()\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tassert.False(t, utils.HasError(d))\n\n}\n"
  },
  {
    "path": "pkg/modules/executors/project_cloud_life_cycle_executor.go",
    "content": "package executors\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/pkg/cli_env\"\n\t\"github.com/selefra/selefra/pkg/cloud_sdk\"\n\tselefraGrpc \"github.com/selefra/selefra/pkg/grpc\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/issue\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/log\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/storage/pgstorage\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n\t\"strings\"\n\t\"sync/atomic\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProjectCloudLifeCycleExecutorOptions Options required when creating a project\ntype ProjectCloudLifeCycleExecutorOptions struct {\n\n\t// The address of the Cloud cluster to which you are connecting\n\tCloudServerHost string\n\n\t// The current project is loaded as a module\n\tModule *module.Module\n\n\t// Send messages to the outside world\n\tMessageChannel *message.Channel[*schema.Diagnostics]\n\n\t// Whether to enable console prompts\n\tEnableConsoleTips bool\n\n\t// Whether to log in\n\tIsNeedLogin bool\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ProjectCloudLifeCycleExecutor struct {\n\n\t// Options when creating the project\n\toptions *ProjectCloudLifeCycleExecutorOptions\n\n\t// the client for connect to cloud\n\tcloudClient *cloud_sdk.CloudClient\n\n\t// for upload log\n\tlogClient         log.LogClient\n\tlogStreamUploader *selefraGrpc.StreamUploader[log.Log_UploadLogStreamClient, int, *log.UploadLogStream_Request, *log.UploadLogStream_Response]\n\n\t// for upload issue\n\tissueStreamUploader *selefraGrpc.StreamUploader[issue.Issue_UploadIssueStreamClient, int, *issue.UploadIssueStream_Request, *issue.UploadIssueStream_Response]\n\n\t// index generator\n\tlogIdGenerator   atomic.Int64\n\tissueIdGenerator atomic.Int64\n\n\t// task current stage\n\tstage log.StageType\n}\n\nfunc NewProjectCloudLifeCycleExecutor(options *ProjectCloudLifeCycleExecutorOptions) *ProjectCloudLifeCycleExecutor {\n\treturn &ProjectCloudLifeCycleExecutor{\n\t\toptions: options,\n\t}\n}\n\nfunc (x *ProjectCloudLifeCycleExecutor) getServerHost() string {\n\tif x.options.CloudServerHost != \"\" {\n\t\tlogger.InfoF(\"ProjectCloudLifeCycleExecutor get getServerHost from options\")\n\t\treturn x.options.CloudServerHost\n\t}\n\treturn cli_env.GetServerHost()\n}\n\nfunc (x *ProjectCloudLifeCycleExecutor) InitCloudClient(ctx context.Context) bool {\n\n\t// 1. create cloud client\n\tcloudServerHost := x.getServerHost()\n\t//x.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Login to selefra cloud %s\", cloudServerHost))\n\tcloudClient, d := cloud_sdk.NewCloudClient(cloudServerHost)\n\t//x.options.MessageChannel.Send(d)\n\tif utils.HasError(d) {\n\t\treturn false\n\t}\n\tx.cloudClient = cloudClient\n\n\t// 2. find local cloud token & use it to login to the cloud\n\tif !x.options.IsNeedLogin {\n\t\tlogger.InfoF(\"do not need to login\")\n\t\treturn true\n\t}\n\tcredentials, _ := cloudClient.GetCredentials()\n\tif credentials != nil && !x.loginByCredentials(ctx, credentials) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n// Login against credentials\nfunc (x *ProjectCloudLifeCycleExecutor) loginByCredentials(ctx context.Context, credentials *cloud_sdk.CloudCredentials) bool {\n\n\tif x.cloudClient == nil {\n\t\tlogger.ErrorF(\"cloudClient is nil, can not loginByCredentials\")\n\t\treturn false\n\t}\n\n\t// try login\n\tlogin, diagnostics := x.cloudClient.Login(credentials.Token)\n\tx.options.MessageChannel.Send(diagnostics)\n\n\t// login failed\n\tif utils.HasError(diagnostics) {\n\t\tif x.options.EnableConsoleTips {\n\t\t\t// login success\n\t\t\tcli_ui.ShowLoginFailed(x.options.CloudServerHost)\n\t\t}\n\t\treturn false\n\t}\n\n\t// login success\n\tif x.options.EnableConsoleTips {\n\t\tcli_ui.ShowLoginSuccess(x.options.CloudServerHost, login)\n\t}\n\n\t// check relative project\n\tif x.options.Module.SelefraBlock == nil ||\n\t\tx.options.Module.SelefraBlock.CloudBlock == nil ||\n\t\tx.options.Module.SelefraBlock.CloudBlock.Project == \"\" {\n\t\terrorMsg := fmt.Sprintf(\"Failed to connect to the cloud, you must specify the project name %s in module\", x.options.Module.BuildFullName())\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(errorMsg))\n\t\treturn false\n\t}\n\n\t// so, we can get project name now\n\tprojectName := x.options.Module.SelefraBlock.CloudBlock.Project\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(fmt.Sprintf(\"Try relative module %s to project %s \", x.options.Module.BuildFullName(), projectName)))\n\n\t// try to relative project\n\tproject, d := x.cloudClient.CreateProject(projectName)\n\tx.options.MessageChannel.Send(d)\n\tif utils.HasError(d) {\n\t\treturn false\n\t}\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Successfully connected to cloud, associated module %s to project %s\", x.options.Module.BuildFullName(), project.Name))\n\n\t// create task\n\ttask, d := x.cloudClient.CreateTask(project.Name)\n\tx.options.MessageChannel.Send(d)\n\tif utils.HasError(d) {\n\t\tmsg := fmt.Sprintf(\"Failed to create a task for the project %s\", project.Name)\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(msg))\n\t\treturn false\n\t}\n\n\tmsg := fmt.Sprintf(\"Succeeded in creating a task %s for project %s\", task.TaskId, project.Name)\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(msg))\n\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Begin init log & issue uploader...\"))\n\tif !x.initLogUploader(x.cloudClient) {\n\t\treturn false\n\t}\n\tif !x.initIssueUploader(x.cloudClient) {\n\t\treturn false\n\t}\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Init log uploader & issue done\"))\n\n\t// change task status to begin\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Change task status to INITIALIZING\"))\n\n\t_ = x.UploadLog(ctx, schema.NewDiagnostics().AddInfo(\"Begin run task %s INITIALIZING stage\", task.TaskId))\n\treturn true\n}\n\n// init issue uploader for send issue to cloud\nfunc (x *ProjectCloudLifeCycleExecutor) initIssueUploader(client *cloud_sdk.CloudClient) bool {\n\tissueStreamUploaderMessageChannel := x.options.MessageChannel.MakeChildChannel()\n\tissueStreamUploader, diagnostics := client.NewIssueStreamUploader(issueStreamUploaderMessageChannel)\n\tx.options.MessageChannel.Send(diagnostics)\n\tif utils.HasError(diagnostics) {\n\t\tissueStreamUploaderMessageChannel.SenderWaitAndClose()\n\t\treturn false\n\t}\n\tissueStreamUploader.RunUploaderWorker()\n\tx.issueStreamUploader = issueStreamUploader\n\treturn true\n}\n\n// init log uploader for send log to loud\nfunc (x *ProjectCloudLifeCycleExecutor) initLogUploader(client *cloud_sdk.CloudClient) bool {\n\tlogStreamUploaderMessageChannel := x.options.MessageChannel.MakeChildChannel()\n\tlogClient, logStreamUploader, diagnostics := client.NewLogStreamUploader(logStreamUploaderMessageChannel)\n\tx.options.MessageChannel.Send(diagnostics)\n\tif utils.HasError(diagnostics) {\n\t\tlogStreamUploaderMessageChannel.SenderWaitAndClose()\n\t\treturn false\n\t}\n\tlogStreamUploader.RunUploaderWorker()\n\tx.logClient = logClient\n\tx.logStreamUploader = logStreamUploader\n\treturn true\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n// UploadIssue add issue to send cloud queue\nfunc (x *ProjectCloudLifeCycleExecutor) UploadIssue(ctx context.Context, r *RuleQueryResult) {\n\t//var t = \"default\"\n\t//if r.Instructions != nil && r.Instructions[\"output\"] != \"\" {\n\t//\tif s, ok := r.Instructions[\"output\"].(string); ok {\n\t//\t\tt = strings.ToLower(s)\n\t//\t}\n\t//}\n\t//switch t {\n\t//case \"json\":\n\t//\tm := make(map[string]interface{})\n\t//\tm[\"schema\"] = r.Schema\n\t//\tm[\"policy\"] = r.RuleBlock.Query\n\t//\tm[\"labels\"] = r.RuleBlock.Labels\n\t//\tm[\"metadata\"] = r.RuleBlock.MetadataBlock\n\t//\tm[\"output\"] = r.RuleBlock.Output\n\t//\tjsonBytes, err := json.MarshalIndent(m, \"\", \"  \")\n\t//\tif err != nil {\n\t//\t\tfmt.Println(\"JSON marshal error:\", err)\n\t//\t\treturn\n\t//\t}\n\t//\tx.UploadLog(ctx, schema.NewDiagnostics().AddInfo(string(jsonBytes)))\n\t//default:\n\t//\tlabelsStr := \"\"\n\t//\tfor _, label := range r.RuleBlock.Labels {\n\t//\t\tlabelsStr += fmt.Sprintf(\"%s \", label)\n\t//\t}\n\t//\tlogStr := utils.GenerateString(\"\\t\"+r.RuleBlock.Output, \" \", labelsStr+\"\\n\")\n\t//\tx.UploadLog(ctx, schema.NewDiagnostics().AddInfo(logStr))\n\t//}\n\t// send to cloud\n\tif x.issueStreamUploader == nil {\n\t\tlogger.ErrorF(\"issueStreamUploader is nil, ignore issue upload\")\n\t\treturn\n\t}\n\trequest := x.convertRuleQueryResultToIssueUploadRequest(r)\n\tx.issueStreamUploader.Submit(ctx, int(request.Index), request)\n}\n\n// Convert the query results of the rules into a format uploaded to the Cloud\nfunc (x *ProjectCloudLifeCycleExecutor) convertRuleQueryResultToIssueUploadRequest(r *RuleQueryResult) *issue.UploadIssueStream_Request {\n\tlabels := make(map[string]string)\n\n\tfor s := range r.RulePlan.Labels {\n\t\tif r.RuleBlock.Labels[s] == nil {\n\t\t\tlabels[s] = utils.Strava(r.RulePlan.Labels[s])\n\t\t\tcontinue\n\t\t}\n\t\tlabels[s] = utils.Strava(r.RuleBlock.Labels[s])\n\t}\n\n\t// rule\n\trule := &issue.UploadIssueStream_Rule{\n\t\tName:   r.RuleBlock.Name,\n\t\tQuery:  r.RuleBlock.Query,\n\t\tLabels: labels,\n\t\tOutput: r.Row.String(),\n\t\tStatus: r.Status,\n\t}\n\tif r.RuleBlock.MetadataBlock != nil {\n\t\trule.Metadata = &issue.UploadIssueStream_Metadata{\n\t\t\tId:          r.RuleBlock.MetadataBlock.Id,\n\t\t\tAuthor:      r.RuleBlock.MetadataBlock.Author,\n\t\t\tDescription: r.RuleBlock.MetadataBlock.Description,\n\t\t\tProvider:    r.RuleBlock.MetadataBlock.Provider,\n\t\t\tRemediation: r.RuleBlock.MetadataBlock.Remediation,\n\t\t\tSeverity:    x.ruleSeverity(r.RuleBlock.MetadataBlock.Severity),\n\t\t\tTags:        r.RuleBlock.MetadataBlock.Tags,\n\t\t\tTitle:       r.RuleBlock.MetadataBlock.Title,\n\t\t}\n\t}\n\n\t// provider\n\truleProvider := &issue.UploadIssueStream_Provider{\n\t\tProvider: r.Provider.Name,\n\t\tVersion:  r.Provider.Version,\n\t}\n\tif r.ProviderConfiguration != nil {\n\t\truleProvider.Name = r.ProviderConfiguration.Name\n\t} else {\n\t\truleProvider.Name = \"NOT_CONFIGURATION\"\n\t}\n\n\t// module\n\truleModule := &issue.UploadIssueStream_Module{\n\t\tName:             r.Module.BuildFullName(),\n\t\tSource:           r.Module.Source,\n\t\tDependenciesPath: r.Module.DependenciesPath,\n\t}\n\n\tschema := pgstorage.GetSchemaKey(ruleProvider.Provider, ruleProvider.Version, r.ProviderConfiguration)\n\tlowSchema := strings.ToLower(schema)\n\t// context\n\truleContext := &issue.UploadIssueStream_Context{\n\t\tSrcTableNames: r.RulePlan.BindingTables,\n\t\tSchema:        lowSchema,\n\t}\n\n\tindex := x.issueIdGenerator.Add(1)\n\treturn &issue.UploadIssueStream_Request{\n\t\tIndex:    int32(index),\n\t\tRule:     rule,\n\t\tProvider: ruleProvider,\n\t\tModule:   ruleModule,\n\t\tContext:  ruleContext,\n\t}\n}\n\n// Convert the original level to the enumerated value accepted by the cloud\nfunc (x *ProjectCloudLifeCycleExecutor) ruleSeverity(severity string) issue.UploadIssueStream_Severity {\n\tswitch strings.ToUpper(severity) {\n\tcase \"INFORMATIONAL\":\n\t\treturn issue.UploadIssueStream_INFORMATIONAL\n\tcase \"LOW\":\n\t\treturn issue.UploadIssueStream_LOW\n\tcase \"MEDIUM\":\n\t\treturn issue.UploadIssueStream_MEDIUM\n\tcase \"HIGH\":\n\t\treturn issue.UploadIssueStream_HIGH\n\tcase \"CRITICAL\":\n\t\treturn issue.UploadIssueStream_CRITICAL\n\tcase \"UNKNOWN\":\n\t\treturn issue.UploadIssueStream_UNKNOWN\n\tdefault:\n\t\treturn issue.UploadIssueStream_UNKNOWN\n\t}\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// UploadLog add log to send cloud waitting queue\nfunc (x *ProjectCloudLifeCycleExecutor) UploadLog(ctx context.Context, diagnostics *schema.Diagnostics) bool {\n\n\tif utils.IsEmpty(diagnostics) {\n\t\treturn false\n\t}\n\n\t// show in console & log file\n\tx.options.MessageChannel.Send(diagnostics)\n\n\t// send to cloud\n\tif x.logStreamUploader == nil {\n\t\tlogger.ErrorF(\"logStreamUploader is nil, ignore upload log\")\n\t\treturn utils.HasError(diagnostics)\n\t}\n\tfor _, d := range diagnostics.GetDiagnosticSlice() {\n\t\tid := x.logIdGenerator.Add(1)\n\t\tisSubmitSuccess, d := x.logStreamUploader.Submit(ctx, int(id), &log.UploadLogStream_Request{\n\t\t\tIndex: uint64(id),\n\t\t\tStage: x.stage,\n\t\t\tMsg:   x.Filter(d.Content()),\n\t\t\tLevel: x.toGrpcLevel(d.Level()),\n\t\t\tTime:  timestamppb.Now(),\n\t\t})\n\t\tx.options.MessageChannel.Send(d)\n\t\tif !isSubmitSuccess {\n\t\t\tlogger.ErrorF(\"submit log index %d to uploader failed\", id)\n\t\t} else {\n\t\t\tlogger.InfoF(\"submit log index %d to uploader success\", id)\n\t\t}\n\t}\n\treturn utils.HasError(diagnostics)\n}\n\nfunc (x *ProjectCloudLifeCycleExecutor) toGrpcLevel(level schema.DiagnosticLevel) log.Level {\n\tswitch level {\n\tcase schema.DiagnosisLevelTrace:\n\t\treturn log.Level_LEVEL_DEBUG\n\tcase schema.DiagnosisLevelDebug:\n\t\treturn log.Level_LEVEL_DEBUG\n\tcase schema.DiagnosisLevelInfo:\n\t\treturn log.Level_LEVEL_INFO\n\tcase schema.DiagnosisLevelWarn:\n\t\treturn log.Level_LEVEL_WARN\n\tcase schema.DiagnosisLevelError:\n\t\treturn log.Level_LEVEL_ERROR\n\tcase schema.DiagnosisLevelFatal:\n\t\treturn log.Level_LEVEL_FATAL\n\tdefault:\n\t\treturn log.Level_LEVEL_INFO\n\t}\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// ShutdownAndWait close send queue and wait uploader done\nfunc (x *ProjectCloudLifeCycleExecutor) ShutdownAndWait(ctx context.Context) {\n\n\t// close issue first\n\tif x.issueStreamUploader != nil {\n\n\t\tlogger.InfoF(\"issueStreamUploader ShutdownAndWait begin...\")\n\t\tx.issueStreamUploader.ShutdownAndWait(ctx)\n\t\tlogger.InfoF(\"issueStreamUploader ShutdownAndWait done\")\n\n\t\tlogger.InfoF(\"issueStreamUploader MessageChannel ReceiverWait begin\")\n\t\tx.issueStreamUploader.GetOptions().MessageChannel.ReceiverWait()\n\t\tlogger.InfoF(\"issueStreamUploader MessageChannel ReceiverWait done\")\n\t}\n\n\t// close log second\n\tif x.logStreamUploader != nil {\n\n\t\tlogger.InfoF(\"logStreamUploader ShutdownAndWait begin...\")\n\t\tx.logStreamUploader.ShutdownAndWait(ctx)\n\t\tlogger.InfoF(\"logStreamUploader ShutdownAndWait end\")\n\n\t\tlogger.InfoF(\"logStreamUploader MessageChannel ReceiverWait begin\")\n\t\tx.logStreamUploader.GetOptions().MessageChannel.ReceiverWait()\n\t\tlogger.InfoF(\"logStreamUploader MessageChannel ReceiverWait done\")\n\t}\n\n\t// close message\n\tlogger.InfoF(\"ProjectCloudLifeCycleExecutor MessageChannel SenderWaitAndClose begin\")\n\tx.options.MessageChannel.SenderWaitAndClose()\n\tlogger.InfoF(\"ProjectCloudLifeCycleExecutor MessageChannel SenderWaitAndClose end\")\n\n}\n\nfunc (x *ProjectCloudLifeCycleExecutor) ChangeLogStage(stage log.StageType) {\n\t// change self first\n\tx.stage = stage\n}\n\n// ReportTaskStatus Modify the current state of the task\nfunc (x *ProjectCloudLifeCycleExecutor) ReportTaskStatus(stage log.StageType, status log.Status) {\n\n\tif x.logClient == nil {\n\t\tlogger.ErrorF(\"can not change task log status, not login\")\n\t\treturn\n\t}\n\tlogger.InfoF(\"begin change task log status, stage = %d, status = %d\", stage, status)\n\tlogStatus, err := x.logClient.UploadLogStatus(x.cloudClient.BuildMetaContext(), &log.UploadLogStatus_Request{\n\t\tStage:  stage,\n\t\tStatus: status,\n\t\tTime:   timestamppb.Now(),\n\t})\n\tif err != nil {\n\t\tlogger.ErrorF(\"change task log status error: %s, stage = %d, status = %d\", err.Error(), stage, status)\n\t\treturn\n\t}\n\tif logStatus.Diagnosis != nil && logStatus.Diagnosis.Code != 0 {\n\t\tlogger.ErrorF(\"change task log status response error, code = %d, message = %s\", logStatus.Diagnosis.Code, logStatus.Diagnosis.Msg)\n\t} else {\n\t\tlogger.InfoF(\"change task log status success, stage = %d, status = %d\", stage, status)\n\t}\n}\n\nfunc (x *ProjectCloudLifeCycleExecutor) Filter(s string) string {\n\ts = strings.ReplaceAll(s, \"\\u001B[31m\", \"\")\n\ts = strings.ReplaceAll(s, \"\\u001B[34m\", \"\")\n\ts = strings.ReplaceAll(s, \"\\u001B[0m\", \"\")\n\treturn s\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/executors/project_local_life_cycle_executor.go",
    "content": "package executors\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/env\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n\t\"github.com/selefra/selefra-provider-sdk/storage_factory\"\n\t\"github.com/selefra/selefra/pkg/grpc/pb/log\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/modules/module_loader\"\n\t\"github.com/selefra/selefra/pkg/modules/planner\"\n\t\"github.com/selefra/selefra/pkg/providers/local_providers_manager\"\n\t\"github.com/selefra/selefra/pkg/storage/pgstorage\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"os\"\n\t\"strings\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProjectLifeCycleStep You can control the execution of a project until it ends at a certain point in the lifecycle\ntype ProjectLifeCycleStep int\n\nconst (\n\n\t// ProjectLifeCycleStepQuery At what point in the project's life cycle, The order is reversed\n\t// Proceed to the module query step\n\tProjectLifeCycleStepQuery ProjectLifeCycleStep = iota\n\n\t// ProjectLifeCycleStepFetch Go to the pull data step\n\tProjectLifeCycleStepFetch\n\n\t// ProjectLifeCycleStepInstall Proceed to the installation step\n\tProjectLifeCycleStepInstall\n\n\t// ProjectLifeCycleStepModuleCheck Module validity check\n\tProjectLifeCycleStepModuleCheck\n\n\t// ProjectLifeCycleStepCloudInit Initialization phase\n\tProjectLifeCycleStepCloudInit\n\n\t// ProjectLifeCycleStepLoadModule Just load the module of the project and do nothing else\n\tProjectLifeCycleStepLoadModule\n)\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// ProjectLocalLifeCycleExecutorOptions The local life cycle of the project\ntype ProjectLocalLifeCycleExecutorOptions struct {\n\t// Gpt query string\n\tInstruction map[string]interface{}\n\n\t// project path\n\tProjectWorkspace string\n\n\t// download things put where\n\tDownloadWorkspace string\n\n\t// The channel through which messages are received externally\n\tMessageChannel *message.Channel[*schema.Diagnostics]\n\n\t// Used to control where the project goes\n\tProjectLifeCycleStep ProjectLifeCycleStep\n\n\t// Used to control which step is executed when pulling, the pull has its own separate lifecycle step partition\n\tFetchStep FetchStep\n\n\t// if set this options, then enable cloud project\n\tProjectCloudLifeCycleExecutorOptions *ProjectCloudLifeCycleExecutorOptions\n\n\t// The database to which data is to be written, May be copied by a higher priority setting\n\tDSN string\n\n\t// The number of concurrences during the fetch phase\n\tFetchWorkerNum uint64\n\n\t// The number of concurrent queries executed\n\tQueryWorkerNum uint64\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// ProjectLifeCycleExecutorName The life cycle of the project\nconst ProjectLifeCycleExecutorName = \"project-local-life-cycle-executor\"\n\n// ProjectLocalLifeCycleExecutor Used to fully run the entire project lifecycle\ntype ProjectLocalLifeCycleExecutor struct {\n\n\t// Some options required for the local life cycle\n\toptions *ProjectLocalLifeCycleExecutorOptions\n\n\t// project module path\n\trootModule *module.Module\n\n\t// for sync to cloud, If you log in, it has a real effect. If you do not log in, it has no real effect\n\tcloudExecutor *ProjectCloudLifeCycleExecutor\n}\n\nvar _ Executor = &ProjectLocalLifeCycleExecutor{}\n\n// NewProjectLocalLifeCycleExecutor Create a project executor\nfunc NewProjectLocalLifeCycleExecutor(options *ProjectLocalLifeCycleExecutorOptions) *ProjectLocalLifeCycleExecutor {\n\treturn &ProjectLocalLifeCycleExecutor{\n\t\toptions: options,\n\t}\n}\n\n// Name of project\nfunc (x *ProjectLocalLifeCycleExecutor) Name() string {\n\treturn ProjectLifeCycleExecutorName\n}\n\n// Execute Actually execute the project\nfunc (x *ProjectLocalLifeCycleExecutor) Execute(ctx context.Context) *schema.Diagnostics {\n\tdefer func() {\n\n\t\t// close cloud\n\t\tif x.cloudExecutor != nil {\n\t\t\tx.cloudExecutor.ShutdownAndWait(ctx)\n\t\t}\n\n\t\t// cloud self\n\t\tx.options.MessageChannel.SenderWaitAndClose()\n\n\t}()\n\n\t// load module & check\n\tif !x.loadModule(ctx) {\n\t\treturn nil\n\t}\n\n\t// init cloud\n\tif x.options.ProjectLifeCycleStep > ProjectLifeCycleStepCloudInit {\n\t\treturn nil\n\t}\n\t_ = x.initCloudClient(ctx)\n\t//if !ok {\n\t//\t_ = x.cloudExecutor.UploadLog(ctx, schema.NewDiagnostics().AddErrorMsg(\"Selefra Cloud init failed, exit.\"))\n\t//\treturn nil\n\t//}\n\t//_ = x.cloudExecutor.UploadLog(ctx, schema.NewDiagnostics().AddInfo(\"Selefra Cloud init success\"))\n\n\t// fix dsn\n\tif !x.fixDsn(ctx) {\n\t\treturn nil\n\t}\n\n\t// validate module is ok\n\tif x.options.ProjectLifeCycleStep > ProjectLifeCycleStepModuleCheck {\n\t\treturn nil\n\t}\n\tvalidatorContext := module.NewValidatorContext()\n\td := x.rootModule.Check(x.rootModule, validatorContext)\n\tif x.cloudExecutor.UploadLog(ctx, d) {\n\t\treturn nil\n\t}\n\n\t// install provider\n\tif x.options.ProjectLifeCycleStep > ProjectLifeCycleStepInstall {\n\t\treturn nil\n\t}\n\tprovidersInstallPlan, providerLocalManager, b := x.install(ctx)\n\tif !b {\n\t\tx.cloudExecutor.ReportTaskStatus(log.StageType_STAGE_TYPE_INITIALIZING, log.Status_STATUS_FAILED)\n\t\treturn nil\n\t}\n\n\t// check update\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\nChecking Selefra provider updates...\\n\"))\n\tfor _, plan := range providersInstallPlan {\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\t- %s all ready updated!\", plan.String()))\n\t}\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\nSelefra has been finished update providers!\"))\n\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\nTesting Selefra operation environment...\\n\"))\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\t- Client verification completed\"))\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\t- Providers verification completed\"))\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\t- Profile verification completed\"))\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\nComplete the Selefra runtime environment test!\"))\n\n\tx.cloudExecutor.ReportTaskStatus(log.StageType_STAGE_TYPE_INITIALIZING, log.Status_STATUS_SUCCESS)\n\tx.cloudExecutor.ChangeLogStage(log.StageType_STAGE_TYPE_PULL_INFRASTRUCTURE)\n\n\t// fetch data\n\tif x.options.ProjectLifeCycleStep > ProjectLifeCycleStepFetch {\n\t\treturn nil\n\t}\n\tfetchExecutor, fetchPlans, b := x.fetch(ctx, providersInstallPlan, providerLocalManager)\n\tif !b {\n\t\tx.cloudExecutor.ReportTaskStatus(log.StageType_STAGE_TYPE_PULL_INFRASTRUCTURE, log.Status_STATUS_FAILED)\n\t\treturn nil\n\t}\n\t// A value of 0 indicates that none of the providers has been successfully pulled, so there is no need to start subsequent pull tasks\n\tif len(fetchExecutor.GetProviderInformationMap()) == 0 {\n\t\tx.cloudExecutor.UploadLog(ctx, schema.NewDiagnostics().AddErrorMsg(\"Fetch failed, can not get provider information\"))\n\t\tx.cloudExecutor.ReportTaskStatus(log.StageType_STAGE_TYPE_PULL_INFRASTRUCTURE, log.Status_STATUS_FAILED)\n\t\treturn nil\n\t}\n\tx.cloudExecutor.ReportTaskStatus(log.StageType_STAGE_TYPE_PULL_INFRASTRUCTURE, log.Status_STATUS_SUCCESS)\n\tx.cloudExecutor.ChangeLogStage(log.StageType_STAGE_TYPE_INFRASTRUCTURE_ANALYSIS)\n\n\tpubOpt := postgresql_storage.NewPostgresqlStorageOptions(x.options.DSN)\n\tpubOpt.SearchPath = \"public\"\n\tpubStorage, d := storage_factory.NewStorage(ctx, storage_factory.StorageTypePostgresql, pubOpt)\n\tif d != nil && d.HasError() {\n\t\tx.options.MessageChannel.Send(d)\n\t\treturn nil\n\t}\n\tfor i := range fetchPlans {\n\t\tlowSchema := strings.ToLower(fetchPlans[i].FetchToDatabaseSchema)\n\t\tdia := pubStorage.SetKey(ctx, fetchPlans[i].ProviderConfigurationBlock.Name, lowSchema)\n\t\tif dia != nil && dia.HasError() {\n\t\t\tx.options.MessageChannel.Send(dia)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// exec query\n\tif x.options.ProjectLifeCycleStep > ProjectLifeCycleStepQuery {\n\t\treturn nil\n\t}\n\tif !x.query(ctx, fetchExecutor, fetchPlans) {\n\t\tx.cloudExecutor.ReportTaskStatus(log.StageType_STAGE_TYPE_INFRASTRUCTURE_ANALYSIS, log.Status_STATUS_FAILED)\n\t\treturn nil\n\t}\n\tx.cloudExecutor.ReportTaskStatus(log.StageType_STAGE_TYPE_INFRASTRUCTURE_ANALYSIS, log.Status_STATUS_SUCCESS)\n\n\treturn nil\n}\n\nfunc (x *ProjectLocalLifeCycleExecutor) fixDsn(ctx context.Context) bool {\n\n\t// 1. first take from local module\n\tif x.rootModule != nil && x.rootModule.SelefraBlock != nil && x.rootModule.SelefraBlock.ConnectionBlock != nil {\n\t\tlogger.InfoF(\"fix dsn from selefra block\")\n\t\tx.options.DSN = x.rootModule.SelefraBlock.ConnectionBlock.BuildDSN()\n\t\treturn true\n\t}\n\n\t// 2. if is login, take from\n\tif x.cloudExecutor != nil && x.cloudExecutor.cloudClient != nil && x.cloudExecutor.cloudClient.IsLoggedIn() {\n\t\tdsn, diagnostics := x.cloudExecutor.cloudClient.FetchOrgDSN()\n\t\tx.options.MessageChannel.Send(diagnostics)\n\t\tif utils.HasError(diagnostics) {\n\t\t\treturn false\n\t\t}\n\t\tif dsn != \"\" {\n\t\t\tlogger.InfoF(\"fix dsn from cloud\")\n\t\t\tx.options.DSN = dsn\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// 3. from options\n\tif x.options.DSN != \"\" {\n\t\tlogger.InfoF(\"fix dsn from options\")\n\t\treturn true\n\t}\n\n\t// 4. from env\n\tif os.Getenv(env.DatabaseDsn) != \"\" {\n\t\tlogger.InfoF(\"fix dsn from env\")\n\t\tx.options.DSN = os.Getenv(env.DatabaseDsn)\n\t\treturn true\n\t}\n\n\t// 5. start default postgresql instance\n\tdsn := pgstorage.DefaultPostgreSQL(x.options.DownloadWorkspace, x.options.MessageChannel.MakeChildChannel())\n\tif dsn != \"\" {\n\t\tx.options.DSN = dsn\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n// Load the module to be apply\nfunc (x *ProjectLocalLifeCycleExecutor) loadModule(ctx context.Context) bool {\n\tmoduleLoaderOptions := &module_loader.LocalDirectoryModuleLoaderOptions{\n\t\tInstruction: x.options.Instruction,\n\t\tModuleLoaderOptions: &module_loader.ModuleLoaderOptions{\n\t\t\tSource:            x.options.ProjectWorkspace,\n\t\t\tVersion:           \"\",\n\t\t\tDownloadDirectory: x.options.DownloadWorkspace,\n\t\t\t// TODO\n\t\t\tProgressTracker:  nil,\n\t\t\tMessageChannel:   x.options.MessageChannel.MakeChildChannel(),\n\t\t\tDependenciesTree: []string{x.options.ProjectWorkspace},\n\t\t},\n\t\tModuleDirectory: x.options.ProjectWorkspace,\n\t}\n\n\tloader, err := module_loader.NewLocalDirectoryModuleLoader(moduleLoaderOptions)\n\tif err != nil {\n\t\tmoduleLoaderOptions.MessageChannel.SenderWaitAndClose()\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"create local directory module loader from %s error: %s\", x.options.ProjectWorkspace, err.Error()))\n\t\treturn false\n\t}\n\n\trootModule, b := loader.Load(ctx)\n\tif !b {\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"local directory module loader load  %s failed.\", x.options.ProjectWorkspace))\n\t\treturn false\n\t}\n\n\tx.rootModule = rootModule\n\treturn true\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// install need providers\nfunc (x *ProjectLocalLifeCycleExecutor) install(ctx context.Context) (planner.ProvidersInstallPlan, *local_providers_manager.LocalProvidersManager, bool) {\n\n\t// Make an installation plan\n\tprovidersInstallPlan, diagnostics := planner.MakeProviderInstallPlan(ctx, x.rootModule)\n\tif x.cloudExecutor.UploadLog(ctx, diagnostics) {\n\t\treturn nil, nil, false\n\t}\n\tif len(providersInstallPlan) == 0 {\n\t\t_ = x.cloudExecutor.UploadLog(ctx, schema.NewDiagnostics().AddErrorMsg(\"no providers\"))\n\t\treturn nil, nil, false\n\t}\n\n\t// Installation-dependent dependency\n\tinstallMessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\t_ = x.cloudExecutor.UploadLog(ctx, message)\n\t\t}\n\t})\n\texecutor, diagnostics := NewProviderInstallExecutor(&ProviderInstallExecutorOptions{\n\t\tPlans:             providersInstallPlan,\n\t\tMessageChannel:    installMessageChannel,\n\t\tDownloadWorkspace: x.options.DownloadWorkspace,\n\t\t// TODO\n\t\tProgressTracker: nil,\n\t})\n\tif x.cloudExecutor.UploadLog(ctx, diagnostics) {\n\t\tinstallMessageChannel.SenderWaitAndClose()\n\t\treturn nil, nil, false\n\t}\n\td := executor.Execute(context.Background())\n\tinstallMessageChannel.ReceiverWait()\n\tif x.cloudExecutor.UploadLog(ctx, d) {\n\t\treturn nil, nil, false\n\t}\n\treturn providersInstallPlan, executor.GetLocalProviderManager(), true\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Start pulling data\nfunc (x *ProjectLocalLifeCycleExecutor) fetch(ctx context.Context, providersInstallPlan planner.ProvidersInstallPlan, localProviderManager *local_providers_manager.LocalProvidersManager) (*ProviderFetchExecutor, planner.ProvidersFetchPlan, bool) {\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\nSelefra will start infrastructure data collection...\\n\"))\n\t// Develop a data pull plan\n\tproviderFetchPlans, d := planner.NewProviderFetchPlanner(&planner.ProviderFetchPlannerOptions{\n\t\tDSN:                          x.options.DSN,\n\t\tModule:                       x.rootModule,\n\t\tProviderVersionVoteWinnerMap: providersInstallPlan.ToMap(),\n\t\tMessageChannel:               x.options.MessageChannel.MakeChildChannel(),\n\t}).MakePlan(ctx)\n\tif x.cloudExecutor.UploadLog(ctx, d) {\n\t\treturn nil, nil, false\n\t}\n\n\tfetchMessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\t_ = x.cloudExecutor.UploadLog(ctx, message)\n\t\t}\n\t})\n\tfetchExecutor := NewProviderFetchExecutor(&ProviderFetchExecutorOptions{\n\t\tLocalProviderManager: localProviderManager,\n\t\tPlans:                providerFetchPlans,\n\t\tMessageChannel:       fetchMessageChannel,\n\t\tWorkerNum:            x.options.FetchWorkerNum,\n\t\tWorkspace:            x.options.ProjectWorkspace,\n\t\tDSN:                  x.options.DSN,\n\t\tFetchStepTo:          x.options.FetchStep,\n\t})\n\td = fetchExecutor.Execute(context.Background())\n\tfetchMessageChannel.ReceiverWait()\n\tif x.cloudExecutor.UploadLog(ctx, d) {\n\t\treturn nil, nil, false\n\t}\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Complete Selefra infrastructure data collection!\\n\"))\n\treturn fetchExecutor, providerFetchPlans, true\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Start querying the policy and output the query results to the console and upload them to the cloud\nfunc (x *ProjectLocalLifeCycleExecutor) query(ctx context.Context, fetchExecutor *ProviderFetchExecutor, providerFetchPlans planner.ProvidersFetchPlan) bool {\n\tplan, d := planner.MakeModuleQueryPlan(ctx, &planner.ModulePlannerOptions{\n\t\tInstruction:        x.options.Instruction,\n\t\tModule:             x.rootModule,\n\t\tTableToProviderMap: fetchExecutor.GetTableToProviderMap(),\n\t})\n\tif x.cloudExecutor.UploadLog(ctx, d) {\n\t\treturn false\n\t}\n\tqueryMessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\t_ = x.cloudExecutor.UploadLog(ctx, message)\n\t})\n\tresultQueryResultChannel := message.NewChannel[*RuleQueryResult](func(index int, message *RuleQueryResult) {\n\t\tx.cloudExecutor.UploadIssue(ctx, message)\n\t})\n\tcontextMap, d := providerFetchPlans.BuildProviderContextMap(ctx, x.options.DSN)\n\tif x.cloudExecutor.UploadLog(ctx, d) {\n\t\treturn false\n\t}\n\tqueryExecutor := NewModuleQueryExecutor(&ModuleQueryExecutorOptions{\n\t\tPlan:                   plan,\n\t\tDownloadWorkspace:      x.options.DownloadWorkspace,\n\t\tMessageChannel:         queryMessageChannel,\n\t\tRuleQueryResultChannel: resultQueryResultChannel,\n\t\tProviderInformationMap: fetchExecutor.GetProviderInformationMap(),\n\t\tProviderExpandMap:      contextMap,\n\t\tWorkerNum:              x.options.QueryWorkerNum,\n\t\t// TODO\n\t\tProgressTracker: nil,\n\t})\n\td = queryExecutor.Execute(ctx)\n\tresultQueryResultChannel.ReceiverWait()\n\tqueryMessageChannel.ReceiverWait()\n\treturn !x.cloudExecutor.UploadLog(ctx, d)\n}\n\nfunc (x *ProjectLocalLifeCycleExecutor) initCloudClient(ctx context.Context) bool {\n\n\t// Projects on the cloud share the same module as local projects\n\tif x.options.ProjectCloudLifeCycleExecutorOptions == nil {\n\t\tx.options.ProjectCloudLifeCycleExecutorOptions = &ProjectCloudLifeCycleExecutorOptions{\n\t\t\tIsNeedLogin:       false,\n\t\t\tEnableConsoleTips: true,\n\t\t}\n\t}\n\n\tif x.options.ProjectCloudLifeCycleExecutorOptions.Module == nil {\n\t\tx.options.ProjectCloudLifeCycleExecutorOptions.Module = x.rootModule\n\t}\n\n\t// The message queue is connected\n\tif x.options.ProjectCloudLifeCycleExecutorOptions.MessageChannel == nil {\n\t\tx.options.ProjectCloudLifeCycleExecutorOptions.MessageChannel = x.options.MessageChannel.MakeChildChannel()\n\t}\n\n\t// if module set cloud host, use it first\n\tif x.rootModule != nil && x.rootModule.SelefraBlock != nil && x.rootModule.SelefraBlock.CloudBlock != nil && x.rootModule.SelefraBlock.CloudBlock.HostName != \"\" {\n\t\tx.options.ProjectCloudLifeCycleExecutorOptions.CloudServerHost = x.rootModule.SelefraBlock.CloudBlock.HostName\n\t}\n\n\tx.cloudExecutor = NewProjectCloudLifeCycleExecutor(x.options.ProjectCloudLifeCycleExecutorOptions)\n\treturn x.cloudExecutor.InitCloudClient(ctx)\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/executors/provider_fetch_executor.go",
    "content": "package executors\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/grpc/shard\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-provider-sdk/storage\"\n\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n\t\"github.com/selefra/selefra-provider-sdk/storage_factory\"\n\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n\t\"github.com/selefra/selefra-utils/pkg/pointer\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/modules/planner\"\n\t\"github.com/selefra/selefra/pkg/plugin\"\n\t\"github.com/selefra/selefra/pkg/providers/local_providers_manager\"\n\t\"github.com/selefra/selefra/pkg/storage/pgstorage\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"io\"\n\t\"path/filepath\"\n\t\"sync\"\n\t\"time\"\n)\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// FetchStep The pull is broken down into small steps, and you can control where to stop\ntype FetchStep int\n\nconst (\n\n\t// FetchStepFetch Notice that the order is reversed\n\t// The default level is the data after fetch\n\tFetchStepFetch FetchStep = iota\n\n\t// FetchStepCreateAllTable Go to create all tables\n\tFetchStepCreateAllTable FetchStep = iota\n\n\t// FetchStepDropAllTable Go to delete all tables\n\tFetchStepDropAllTable FetchStep = iota\n\n\t// FetchStepGetInformation Go to get the Provider information\n\tFetchStepGetInformation FetchStep = iota\n\n\t// FetchStepGetInit Perform Provider initialization\n\tFetchStepGetInit FetchStep = iota\n\n\t// FetchStepGetStart Just start the Provider up and quit\n\tFetchStepGetStart FetchStep = iota\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProviderFetchExecutorOptions Various parameter options when pulling data\ntype ProviderFetchExecutorOptions struct {\n\n\t// Used to find the Provider and start the instance\n\tLocalProviderManager *local_providers_manager.LocalProvidersManager\n\n\t// The pull plan to execute\n\tPlans []*planner.ProviderFetchPlan\n\n\t// Receive message feedback in real time\n\tMessageChannel *message.Channel[*schema.Diagnostics]\n\n\t// Number of providers that are concurrently pulled\n\tWorkerNum uint64\n\n\t// Working directory\n\tWorkspace string\n\n\t// Connect to database\n\tDSN string\n\n\t// At which stage to exit\n\tFetchStepTo FetchStep\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst FetchExecutorName = \"provider-fetch-executor\"\n\n// ProviderFetchExecutor An actuator for pulling data\ntype ProviderFetchExecutor struct {\n\toptions *ProviderFetchExecutorOptions\n\n\t// After the Provider is started, information about the Provider is collected\n\tproviderInformationMap map[string]*shard.GetProviderInformationResponse\n}\n\nvar _ Executor = &ProviderFetchExecutor{}\n\nfunc NewProviderFetchExecutor(options *ProviderFetchExecutorOptions) *ProviderFetchExecutor {\n\treturn &ProviderFetchExecutor{\n\t\toptions: options,\n\t}\n}\n\nfunc (x *ProviderFetchExecutor) GetProviderInformationMap() map[string]*shard.GetProviderInformationResponse {\n\treturn x.providerInformationMap\n}\n\nfunc (x *ProviderFetchExecutor) GetTableToProviderMap() map[string]string {\n\ttableToProviderMap := make(map[string]string)\n\tfor providerName, providerInformation := range x.providerInformationMap {\n\t\tfor _, table := range providerInformation.Tables {\n\t\t\tflatTableToProviderMap(providerName, table, tableToProviderMap)\n\t\t}\n\t}\n\treturn tableToProviderMap\n}\n\n// Generate a mapping of a single table to the provider\nfunc flatTableToProviderMap(providerName string, table *schema.Table, m map[string]string) {\n\tm[table.TableName] = providerName\n\n\tfor _, subTable := range table.SubTables {\n\t\tflatTableToProviderMap(providerName, subTable, m)\n\t}\n}\n\nfunc (x *ProviderFetchExecutor) Name() string {\n\treturn FetchExecutorName\n}\n\nfunc (x *ProviderFetchExecutor) Execute(ctx context.Context) *schema.Diagnostics {\n\n\tdefer func() {\n\t\tlogger.InfoF(\"fetch MessageChannel.SenderWaitAndClose begin\")\n\t\tx.options.MessageChannel.SenderWaitAndClose()\n\t\tlogger.InfoF(\"fetch MessageChannel.SenderWaitAndClose end\")\n\t}()\n\n\t// TODO Scheduling algorithm, Minimize waiting\n\t//x.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Make fetch queue begin...\"))\n\tfetchPlanChannel := make(chan *planner.ProviderFetchPlan, len(x.options.Plans))\n\tfor _, plan := range x.options.Plans {\n\t\tfetchPlanChannel <- plan\n\t}\n\tclose(fetchPlanChannel)\n\t//x.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Make fetch queue done...\"))\n\n\t// The concurrent pull starts\n\tproviderInformationChannel := make(chan *shard.GetProviderInformationResponse, len(x.options.Plans))\n\t//x.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Run fetch worker...\"))\n\twg := sync.WaitGroup{}\n\tfor i := uint64(0); i < x.options.WorkerNum; i++ {\n\t\twg.Add(1)\n\t\tNewProviderFetchExecutorWorker(x, fetchPlanChannel, providerInformationChannel, &wg).Run()\n\t}\n\t//x.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Start fetch worker done, wait queue consumer done.\"))\n\twg.Wait()\n\t//x.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Fetch queue done\"))\n\n\t// Sort the provider information\n\tclose(providerInformationChannel)\n\tproviderInformationMap := make(map[string]*shard.GetProviderInformationResponse)\n\tfor response := range providerInformationChannel {\n\t\tproviderInformationMap[response.Name] = response\n\t}\n\tx.providerInformationMap = providerInformationMap\n\n\treturn nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProviderFetchExecutorWorker A working coroutine used to perform a pull task\ntype ProviderFetchExecutorWorker struct {\n\n\t// Is the task in which actuator is executed\n\texecutor *ProviderFetchExecutor\n\n\t// Task queue\n\tplanChannel chan *planner.ProviderFetchPlan\n\n\t// Exit signal\n\twg *sync.WaitGroup\n\n\t// Collect information about the started providers\n\tproviderInformationCollector chan *shard.GetProviderInformationResponse\n}\n\nfunc NewProviderFetchExecutorWorker(executor *ProviderFetchExecutor, planChannel chan *planner.ProviderFetchPlan, providerInformationCollector chan *shard.GetProviderInformationResponse, wg *sync.WaitGroup) *ProviderFetchExecutorWorker {\n\treturn &ProviderFetchExecutorWorker{\n\t\texecutor:                     executor,\n\t\tplanChannel:                  planChannel,\n\t\twg:                           wg,\n\t\tproviderInformationCollector: providerInformationCollector,\n\t}\n}\n\nfunc (x *ProviderFetchExecutorWorker) Run() {\n\tgo func() {\n\t\tdefer func() {\n\t\t\tx.wg.Done()\n\t\t}()\n\t\tfor plan := range x.planChannel {\n\t\t\t// The drop-down time limit for a single Provider is a month. If it is insufficient, adjust it again\n\t\t\tctx, cancelFunc := context.WithTimeout(context.Background(), time.Hour*24*30)\n\t\t\tx.executePlan(ctx, plan)\n\t\t\tcancelFunc()\n\t\t}\n\t}()\n}\n\n// Execute a provider fetch task plan\nfunc (x *ProviderFetchExecutorWorker) executePlan(ctx context.Context, plan *planner.ProviderFetchPlan) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Begin fetch provider %s\", plan.String())))\n\n\t// Find the local path of the provider\n\tlocalProvider := &local_providers_manager.LocalProvider{\n\t\tProvider: plan.Provider,\n\t}\n\tinstalled, d := x.executor.options.LocalProviderManager.IsProviderInstalled(ctx, localProvider)\n\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, diagnostics))\n\t\treturn\n\t}\n\tif !installed {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, diagnostics.AddErrorMsg(\"Provider %s not installed, can not exec fetch for it\", plan.String())))\n\t\treturn\n\t}\n\n\t// Find the local installation location of the provider\n\tlocalProviderMeta, d := x.executor.options.LocalProviderManager.Get(ctx, localProvider)\n\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, diagnostics))\n\t\treturn\n\t}\n\n\t// Start provider\n\tplug, err := plugin.NewManagedPlugin(localProviderMeta.ExecutableFilePath, plan.Name, plan.Version, \"\", nil)\n\tif err != nil {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddErrorMsg(\"Start provider %s at %s failed: %s\", plan.String(), localProviderMeta.ExecutableFilePath, err.Error())))\n\t\treturn\n\t}\n\t// Close the provider at the end of the method execution\n\tdefer func() {\n\t\tplug.Close()\n\t\t//x.sendMessage(schema.NewDiagnostics().AddInfo(\"Stop provider %s at %s \", plan.String(), localProviderMeta.ExecutableFilePath))\n\t}()\n\n\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Start provider %s success\", plan.String())))\n\n\t// init\n\tif x.executor.options.FetchStepTo > FetchStepGetInit {\n\t\t// TODO log\n\t\treturn\n\t}\n\n\t// Database connection option\n\tstorageOpt := postgresql_storage.NewPostgresqlStorageOptions(x.executor.options.DSN)\n\tpgstorage.WithSearchPath(plan.FetchToDatabaseSchema)(storageOpt)\n\topt, err := json.Marshal(storageOpt)\n\tif err != nil {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddErrorMsg(\"Json marshal postgresql options error: %s\", err.Error())))\n\t\treturn\n\t}\n\n\t// Get the lock first\n\tdatabaseStorage, d := storage_factory.NewStorage(ctx, storage_factory.StorageTypePostgresql, storageOpt)\n\tx.sendMessage(x.addProviderNameForMessage(plan, d))\n\tif utils.HasError(d) {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tdatabaseStorage.Close()\n\t}()\n\townerId := utils.BuildLockOwnerId()\n\ttryTimes := 0\n\tfor {\n\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s, schema %s, owner %s, fetch data, try get fetch lock...\", plan.String(), plan.FetchToDatabaseSchema, ownerId)))\n\n\t\ttryTimes++\n\t\terr := databaseStorage.Lock(ctx, pgstorage.LockId, ownerId)\n\t\tif err != nil {\n\t\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddErrorMsg(\"Provider %s, schema %s, owner %s, fetch data, get fetch lock error: %s, will sleep & retry, tryTimes = %d\", plan.String(), plan.FetchToDatabaseSchema, ownerId, err.Error(), tryTimes)))\n\t\t} else {\n\t\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s, schema %s, owner %s, fetch data, get fetch lock success\", plan.String(), plan.FetchToDatabaseSchema, ownerId)))\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second * 10)\n\t}\n\tdefer func() {\n\t\tfor tryTimes := 0; tryTimes < 10; tryTimes++ {\n\t\t\terr := databaseStorage.UnLock(ctx, pgstorage.LockId, ownerId)\n\t\t\tif err != nil {\n\t\t\t\tif errors.Is(err, postgresql_storage.ErrLockNotFound) {\n\t\t\t\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s, schema %s, owner %s, fetch data, release fetch lock success\", plan.String(), plan.FetchToDatabaseSchema, ownerId)))\n\t\t\t\t} else {\n\t\t\t\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddErrorMsg(\"Provider %s, schema %s, owner %s, fetch data, release fetch lock error: %s, will sleep & retry, tryTimes = %d\", plan.String(), plan.FetchToDatabaseSchema, ownerId, err.Error(), tryTimes)))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s, schema %s, owner %s, fetch data, release fetch lock success\", plan.String(), plan.FetchToDatabaseSchema, ownerId)))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\t// TODO Default values for processing parameters\n\t// Initialize the provider\n\tpluginProvider := plug.Provider()\n\tvar providerYamlConfiguration string\n\tif plan.ProviderConfigurationBlock == nil {\n\t\tproviderYamlConfiguration = module.GetDefaultProviderConfigYamlConfiguration(plan.Name, plan.Version)\n\t} else {\n\t\tproviderYamlConfiguration = plan.GetProvidersConfigYamlString()\n\t}\n\n\tworkspace, _ := filepath.Abs(x.executor.options.Workspace)\n\tproviderInitResponse, err := pluginProvider.Init(ctx, &shard.ProviderInitRequest{\n\t\tWorkspace: pointer.ToStringPointer(workspace),\n\t\tStorage: &shard.Storage{\n\t\t\tType:           0,\n\t\t\tStorageOptions: opt,\n\t\t},\n\t\tIsInstallInit:  pointer.FalsePointer(),\n\t\tProviderConfig: pointer.ToStringPointerOrNilIfEmpty(providerYamlConfiguration),\n\t})\n\tif err != nil {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddErrorMsg(\"Start provider failed: %s\", err.Error())))\n\t\treturn\n\t}\n\t// TODO There is a problem with process interruption here\n\tif utils.IsNotEmpty(providerInitResponse.Diagnostics) {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, providerInitResponse.Diagnostics))\n\t\tif utils.HasError(providerInitResponse.Diagnostics) {\n\t\t\treturn\n\t\t}\n\t}\n\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s init success\", plan.String())))\n\n\t// get information\n\tif x.executor.options.FetchStepTo > FetchStepGetInformation {\n\t\treturn\n\t}\n\n\t// Get information about the started provider\n\tinformation, err := pluginProvider.GetProviderInformation(ctx, &shard.GetProviderInformationRequest{})\n\tif err != nil {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddErrorMsg(\"Provider %s, schema %s, get provider information failed: %s\", plan.String(), plan.FetchToDatabaseSchema, err.Error())))\n\t\treturn\n\t}\n\tx.providerInformationCollector <- information\n\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Get provider %s information success\", plan.String())))\n\n\tif x.executor.options.FetchStepTo > FetchStepDropAllTable {\n\t\treturn\n\t}\n\n\tcli_ui.Infof(\"%s %s, pull infrastructure data:\\n\", plan.ProviderConfigurationBlock.Provider, plan.Provider.String())\n\t// Check whether the cache can be removed\n\tcache, needFetchTableSet := x.tryHitCache(ctx, databaseStorage, plan, information)\n\tif cache {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s pull data hit cache\", plan.String())))\n\t\tcli_ui.Infof(\"Hit Selefra %s Provider cache! The default data cache time is %s.\\n\\n\", plan.String(), plan.ProviderConfigurationBlock.Cache)\n\t\treturn\n\t}\n\n\t// Delete the table before provider\n\tdropRes, err := pluginProvider.DropTableAll(ctx, &shard.ProviderDropTableAllRequest{})\n\tif err != nil {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddErrorMsg(\"Provider %s, schema %s, drop all table failed: %s\", plan.String(), plan.FetchToDatabaseSchema, err.Error())))\n\t\treturn\n\t}\n\tx.sendMessage(x.addProviderNameForMessage(plan, dropRes.Diagnostics))\n\tif utils.HasError(dropRes.Diagnostics) {\n\t\treturn\n\t}\n\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s drop database schema clean success\", plan.String())))\n\n\tif x.executor.options.FetchStepTo > FetchStepCreateAllTable {\n\t\treturn\n\t}\n\n\t// create all tables\n\tcreateRes, err := pluginProvider.CreateAllTables(ctx, &shard.ProviderCreateAllTablesRequest{})\n\tif err != nil {\n\t\tcli_ui.Errorln(err.Error())\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddErrorMsg(\"Provider %s, schema %s, create all table failed: %s\", plan.String(), plan.FetchToDatabaseSchema, err.Error())))\n\t\treturn\n\t}\n\tif createRes.Diagnostics != nil {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, createRes.Diagnostics))\n\t\tif utils.HasError(createRes.Diagnostics) {\n\t\t\treturn\n\t\t}\n\t}\n\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s create tables success\", plan.String())))\n\n\tif x.executor.options.FetchStepTo > FetchStepFetch {\n\t\treturn\n\t}\n\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s begin fetch...\", plan.String())))\n\n\t// being pull data\n\tneedFetchTableNameSlice := make([]string, 0)\n\tfor tableName := range needFetchTableSet {\n\t\tneedFetchTableNameSlice = append(needFetchTableNameSlice, tableName)\n\t}\n\trecv, err := pluginProvider.PullTables(ctx, &shard.PullTablesRequest{\n\t\tTables:        needFetchTableNameSlice,\n\t\tMaxGoroutines: plan.GetMaxGoroutines(),\n\t\tTimeout:       0,\n\t})\n\tif err != nil {\n\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddErrorMsg(\"Provider %s, schema %s, pull table failed: %s\", plan.String(), plan.FetchToDatabaseSchema, err.Error())))\n\t\treturn\n\t}\n\t//progbar := progress.DefaultProgress()\n\t//progbar.Add(decl.Name+\"@\"+decl.Version, -1)\n\t//success := 0\n\t//errorsN := 0\n\t//var total int64\n\t//for {\n\t//\tres, err := recv.Recv()\n\t//\tif err != nil {\n\t//\t\tif errors.Is(err, io.EOF) {\n\t//\t\t\tprogbar.Current(decl.Name+\"@\"+decl.Version, total, \"Done\")\n\t//\t\t\tprogbar.Done(decl.Name + \"@\" + decl.Version)\n\t//\t\t\tbreak\n\t//\t\t}\n\t//\t\treturn err\n\t//\t}\n\t//\tprogbar.SetTotal(decl.Name+\"@\"+decl.Version, int64(res.TableCount))\n\t//\tprogbar.Current(decl.Name+\"@\"+decl.Version, int64(len(res.FinishedTables)), res.Table)\n\t//\ttotal = int64(res.TableCount)\n\t//\tif res.Diagnostics != nil {\n\t//\t\tif res.Diagnostics.HasError() {\n\t//\t\t\tcli_ui.SaveLogToDiagnostic(res.Diagnostics.GetDiagnosticSlice())\n\t//\t\t}\n\t//\t}\n\t//\tsuccess = len(res.FinishedTables)\n\t//\terrorsN = 0\n\t//}\n\t//progbar.ReceiverWait(decl.Name + \"@\" + decl.Version)\n\t//if errorsN > 0 {\n\t//\tcli_ui.Errorf(\"\\nPull complete! Total Resources pulled:%d        Errors: %d\\n\", success, errorsN)\n\t//\treturn nil\n\t//}\n\t//cli_ui.Infof(\"\\nPull complete! Total Resources pulled:%d        Errors: %d\\n\", success, errorsN)\n\t//return nil\n\n\tsuccess := 0\n\terrorsN := 0\n\tvar total int64\n\trecordCount := 0\n\tfor {\n\t\tres, err := recv.Recv()\n\t\tif err != nil {\n\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\tcli_ui.Infof(\"Provider %s resource fetch %d/%d \\n\\n\", plan.String(), success, total)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddErrorMsg(err.Error())))\n\t\t\treturn\n\t\t}\n\t\ttotal = int64(res.TableCount)\n\t\tif res.Diagnostics != nil {\n\t\t\tx.sendMessage(x.addProviderNameForMessage(plan, res.Diagnostics))\n\t\t}\n\n\t\t// count record pull\n\t\tif utils.NotHasError(res.Diagnostics) {\n\t\t\trecordCount++\n\t\t}\n\n\t\tsuccess = len(res.FinishedTables)\n\t\terrorsN = 0\n\t\tcli_ui.Infof(\"Provider %s resource fetch %d/%d, finished task count %d ...\\r\", plan.String(), success, total, recordCount)\n\t\t//x.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s resource fetch %d/%d, finished task count %d ...\", plan.String(), success, total, recordCount)))\n\t}\n\t_ = success\n\t_ = total\n\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s fetch %d/%d, record count %d ...\", plan.String(), success, total, recordCount)))\n\t//progbar.ReceiverWait(decl.Name + \"@\" + decl.Version)\n\tif errorsN > 0 {\n\t\t//cli_ui.Errorf(\"\\nPull complete! Total Resources pulled:%d        Errors: %d\\n\", success, errorsN)\n\t\t//return nil\n\t\treturn\n\t}\n\t//cli_ui.Infof(\"\\nPull complete! Total Resources pulled:%d        Errors: %d\\n\", success, errorsN)\n\t//return nil\n\tx.sendMessage(x.addProviderNameForMessage(plan, schema.NewDiagnostics().AddInfo(\"Provider %s fetch done\", plan.String())))\n\n\t// save table pull time\n\td = x.refreshPullTableTime(ctx, databaseStorage, plan, needFetchTableSet)\n\tif utils.IsNotEmpty(d) {\n\t\tx.executor.options.MessageChannel.Send(d)\n\t}\n\n\treturn\n}\n\nfunc (x *ProviderFetchExecutorWorker) addProviderNameForMessage(plan *planner.ProviderFetchPlan, d *schema.Diagnostics) *schema.Diagnostics {\n\tif d == nil {\n\t\treturn nil\n\t}\n\tdiagnostics := schema.NewDiagnostics()\n\tfor _, item := range d.GetDiagnosticSlice() {\n\t\tdiagnostics.AddDiagnostic(schema.NewDiagnostic(item.Level(), fmt.Sprintf(\"Provider %s say: %s\", plan.String(), item.Content())))\n\t}\n\treturn diagnostics\n}\n\nfunc (x *ProviderFetchExecutorWorker) sendMessage(message *schema.Diagnostics) {\n\tx.executor.options.MessageChannel.Send(message)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// An attempt is made to hit the cache of the data pull, and if the cache can be hit, the previous data is used instead of a repeat pull\nfunc (x *ProviderFetchExecutorWorker) tryHitCache(ctx context.Context, databaseStorage storage.Storage, plan *planner.ProviderFetchPlan, providerInformation *shard.GetProviderInformationResponse) (bool, map[string]struct{}) {\n\n\t// Step 01. Calculate all the root tables that need to be pulled\n\ttooRootTableMap := x.makeToRootTableMap(providerInformation)\n\tneedFetchTableNameSet := map[string]struct{}{}\n\t//  If resource is specified, only the given resource is pulled\n\tif plan.ProviderConfigurationBlock != nil && len(plan.ProviderConfigurationBlock.Resources) != 0 {\n\t\tfor _, tableName := range plan.ProviderConfigurationBlock.Resources {\n\t\t\tneedFetchTableNameSet[tooRootTableMap[tableName]] = struct{}{}\n\t\t}\n\t} else {\n\t\t// Otherwise, all resources of this provider are pulled by default\n\t\tfor _, table := range providerInformation.Tables {\n\t\t\tneedFetchTableNameSet[table.TableName] = struct{}{}\n\t\t}\n\t}\n\n\t//  If caching is not enabled, return directly\n\tif !x.isEnableFetchCache(ctx, databaseStorage, plan) {\n\t\treturn false, needFetchTableNameSet\n\t}\n\n\tcache, diagnostics := x.computeAllNeedPullTableCanHitCache(ctx, databaseStorage, plan, needFetchTableNameSet)\n\tx.executor.options.MessageChannel.Send(diagnostics)\n\treturn cache, needFetchTableNameSet\n}\n\nfunc (x *ProviderFetchExecutorWorker) isEnableFetchCache(ctx context.Context, storage storage.Storage, plan *planner.ProviderFetchPlan) bool {\n\tif plan == nil || plan.ProviderConfigurationBlock == nil || plan.ProviderConfigurationBlock.Cache == \"\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// Calculate all the tables that need to be pulled\nfunc (x *ProviderFetchExecutorWorker) computeAllNeedPullTableCanHitCache(ctx context.Context, storage storage.Storage, plan *planner.ProviderFetchPlan, needFetchTableNameSet map[string]struct{}) (bool, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// Step 02. Resolve whether it is expired\n\tduration, err := module.ParseDuration(plan.ProviderConfigurationBlock.Cache)\n\tif err != nil {\n\t\treturn false, schema.NewDiagnostics().AddErrorMsg(\"Parse cache duration failed: %s\", err.Error())\n\t}\n\tdatabaseTime, err := storage.GetTime(ctx)\n\tif err != nil {\n\t\treturn false, schema.NewDiagnostics().AddErrorMsg(\"Get database time failed: %s\", err.Error())\n\t}\n\n\t// The expiration time of the cache in the table\n\n\t// Step 03.\n\tpullTaskId := \"\"\n\tfor tableName := range needFetchTableNameSet {\n\t\tinformation, d := pgstorage.ReadTableCacheInformation(ctx, storage, tableName)\n\t\tif utils.HasError(d) {\n\t\t\tlogger.ErrorF(\"read table cache information error: %s\", d.String())\n\t\t\treturn false, d\n\t\t}\n\t\tif information == nil {\n\t\t\tlogger.ErrorF(\"read table cache information nil\")\n\t\t\treturn false, x.addProviderNameForMessage(plan, diagnostics.AddInfo(\"Table %s did not find cache information, still need pull table\", tableName))\n\t\t}\n\n\t\t// It has to be from the same batch\n\t\tif pullTaskId == \"\" {\n\t\t\tpullTaskId = information.LastPullId\n\t\t} else if pullTaskId != information.LastPullId {\n\t\t\treturn false, x.addProviderNameForMessage(plan, diagnostics.AddInfo(\"Table %s is not in the same period as the previous data pull, so the cache cannot be hit, still need pull table\", tableName))\n\t\t}\n\n\t\tif information.LastPullTime.Add(duration).Before(databaseTime) {\n\t\t\treturn false, x.addProviderNameForMessage(plan, diagnostics.AddInfo(\"Table %s pulls data that is out of date, still need pull table, last pull time %s, database now time %s, cache %s\",\n\t\t\t\ttableName, information.LastPullTime.String(), databaseTime.String(), duration.String()))\n\t\t}\n\n\t\t// ok, this table can hit cache\n\n\t}\n\n\t// ok, all table can hit cache\n\treturn true, nil\n}\n\n// Expand the forest of all tables of the provider into a mapping table from the current table name to the root table name\nfunc (x *ProviderFetchExecutorWorker) makeToRootTableMap(providerInformation *shard.GetProviderInformationResponse) map[string]string {\n\ttableRootMap := make(map[string]string, 0)\n\tfor rootTableName, rootTable := range providerInformation.Tables {\n\t\tfor _, tableName := range x.flatTable(rootTable) {\n\t\t\ttableRootMap[tableName] = rootTableName\n\t\t}\n\t}\n\treturn tableRootMap\n}\n\nfunc (x *ProviderFetchExecutorWorker) flatTable(table *schema.Table) []string {\n\tif table == nil {\n\t\treturn nil\n\t}\n\ttableNameSlice := []string{table.TableName}\n\tfor _, subTables := range table.SubTables {\n\t\ttableNameSlice = append(tableNameSlice, x.flatTable(subTables)...)\n\t}\n\treturn tableNameSlice\n}\n\nfunc (x *ProviderFetchExecutorWorker) refreshPullTableTime(ctx context.Context, databaseStorage storage.Storage, plan *planner.ProviderFetchPlan, needFetchTableNameSet map[string]struct{}) *schema.Diagnostics {\n\tdiagnostics := schema.NewDiagnostics()\n\tpullId := id_util.RandomId()\n\tstorageTime, err := databaseStorage.GetTime(ctx)\n\tif err != nil {\n\t\treturn diagnostics.AddErrorMsg(\"Get storage time error: %s\", err.Error())\n\t}\n\tfor tableName := range needFetchTableNameSet {\n\t\tinformation := &pgstorage.TableCacheInformation{\n\t\t\tTableName:    tableName,\n\t\t\tLastPullId:   pullId,\n\t\t\tLastPullTime: storageTime,\n\t\t}\n\t\td := pgstorage.SaveTableCacheInformation(ctx, databaseStorage, information)\n\t\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\t\treturn diagnostics\n\t\t}\n\t}\n\treturn diagnostics\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/executors/provider_fetch_executor_test.go",
    "content": "package executors\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/env\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestProviderFetchExecutor_Execute(t *testing.T) {\n\n\tprojectWorkspace := \"./test_data/test_fetch_module\"\n\tdownloadWorkspace := \"./test_download\"\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.ToString())\n\t\t}\n\t})\n\td := NewProjectLocalLifeCycleExecutor(&ProjectLocalLifeCycleExecutorOptions{\n\t\tProjectWorkspace:                     projectWorkspace,\n\t\tDownloadWorkspace:                    downloadWorkspace,\n\t\tMessageChannel:                       messageChannel,\n\t\tProjectLifeCycleStep:                 ProjectLifeCycleStepFetch,\n\t\tFetchStep:                            FetchStepFetch,\n\t\tProjectCloudLifeCycleExecutorOptions: nil,\n\t\tDSN:                                  env.GetDatabaseDsn(),\n\t\tFetchWorkerNum:                       1,\n\t\tQueryWorkerNum:                       1,\n\t}).Execute(context.Background())\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tassert.False(t, utils.HasError(d))\n\n}\n\nfunc TestProviderFetchExecutorWorker_computeNeedFetchTables(t *testing.T) {\n\n\tprojectWorkspace := \"./test_data/test_fetch_module_with_cache\"\n\tdownloadWorkspace := \"./test_download\"\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.ToString())\n\t\t}\n\t})\n\td := NewProjectLocalLifeCycleExecutor(&ProjectLocalLifeCycleExecutorOptions{\n\t\tProjectWorkspace:                     projectWorkspace,\n\t\tDownloadWorkspace:                    downloadWorkspace,\n\t\tMessageChannel:                       messageChannel,\n\t\tProjectLifeCycleStep:                 ProjectLifeCycleStepFetch,\n\t\tFetchStep:                            FetchStepFetch,\n\t\tProjectCloudLifeCycleExecutorOptions: nil,\n\t\tDSN:                                  env.GetDatabaseDsn(),\n\t\tFetchWorkerNum:                       1,\n\t\tQueryWorkerNum:                       1,\n\t}).Execute(context.Background())\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tassert.False(t, utils.HasError(d))\n\n}\n"
  },
  {
    "path": "pkg/modules/executors/provider_install_executor.go",
    "content": "package executors\n\nimport (\n\t\"context\"\n\t\"github.com/hashicorp/go-getter\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/planner\"\n\t\"github.com/selefra/selefra/pkg/providers/local_providers_manager\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProviderInstallExecutorOptions Install the provider's actuator\ntype ProviderInstallExecutorOptions struct {\n\n\t// The installation plan to execute\n\tPlans []*planner.ProviderInstallPlan\n\n\t// The path to install to\n\tDownloadWorkspace string\n\n\t// Receive real-time message feedback\n\tMessageChannel *message.Channel[*schema.Diagnostics]\n\n\t// Tracking installation progress\n\tProgressTracker getter.ProgressTracker\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst ProviderInstallExecutorName = \"provider-install-executor\"\n\ntype ProviderInstallExecutor struct {\n\toptions *ProviderInstallExecutorOptions\n\n\tlocalProviderManager *local_providers_manager.LocalProvidersManager\n}\n\nvar _ Executor = &ProviderInstallExecutor{}\n\nfunc NewProviderInstallExecutor(options *ProviderInstallExecutorOptions) (*ProviderInstallExecutor, *schema.Diagnostics) {\n\tdiagnostics := schema.NewDiagnostics()\n\n\tmanager, err := local_providers_manager.NewLocalProvidersManager(options.DownloadWorkspace)\n\tif err != nil {\n\t\treturn nil, diagnostics.AddErrorMsg(err.Error())\n\t}\n\n\treturn &ProviderInstallExecutor{\n\t\toptions:              options,\n\t\tlocalProviderManager: manager,\n\t}, diagnostics\n}\n\n// GetLocalProviderManager This way we can reuse the local provider manager\nfunc (x *ProviderInstallExecutor) GetLocalProviderManager() *local_providers_manager.LocalProvidersManager {\n\treturn x.localProviderManager\n}\n\nfunc (x *ProviderInstallExecutor) Name() string {\n\treturn ProviderInstallExecutorName\n}\n\nfunc (x *ProviderInstallExecutor) Execute(ctx context.Context) *schema.Diagnostics {\n\n\tdefer func() {\n\t\tx.options.MessageChannel.SenderWaitAndClose()\n\t}()\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Initializing provider plugins...\\n\"))\n\tvar plans []*planner.ProviderInstallPlan\n\tfor _, plan := range x.options.Plans {\n\t\tdiagnostics.AddDiagnostics(x.executePlan(ctx, plan))\n\t\tplans = append(plans, plan)\n\t}\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\nSelefra has been successfully installed providers!\"))\n\treturn diagnostics\n}\n\nfunc (x *ProviderInstallExecutor) executePlan(ctx context.Context, plan *planner.ProviderInstallPlan) *schema.Diagnostics {\n\trequiredProvider := &local_providers_manager.LocalProvider{\n\t\tProvider: plan.Provider,\n\t}\n\tinstalled, diagnostics := x.localProviderManager.IsProviderInstalled(ctx, requiredProvider)\n\tif utils.HasError(diagnostics) {\n\t\treturn diagnostics\n\t}\n\tif installed {\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\t- %s all ready updated!\", plan.String()))\n\t\treturn nil\n\t}\n\n\t//x.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\t- %s downloading...\", plan.String()))\n\n\tx.localProviderManager.InstallProvider(ctx, &local_providers_manager.InstallProvidersOptions{\n\t\tRequiredProvider: requiredProvider,\n\t\tMessageChannel:   x.options.MessageChannel.MakeChildChannel(),\n\t\tProgressTracker:  x.options.ProgressTracker,\n\t})\n\n\t// TODO init\n\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"\\r\\t- %s all ready updated!\", plan.String()))\n\n\treturn nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/executors/provider_install_executor_test.go",
    "content": "package executors\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/planner\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/selefra/selefra/pkg/version\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestProviderInstallExecutor_Execute(t *testing.T) {\n\n\tproviderInstallPlans := []*planner.ProviderInstallPlan{\n\t\tplanner.NewProviderInstallPlan(\"alicloud\", \"v0.0.1\"),\n\t\tplanner.NewProviderInstallPlan(\"alicloud\", \"v0.0.2\"),\n\t\tplanner.NewProviderInstallPlan(\"alicloud\", \"v0.0.3\"),\n\t\tplanner.NewProviderInstallPlan(\"alicloud\", version.VersionLatest),\n\t\tplanner.NewProviderInstallPlan(\"gcp\", version.VersionLatest),\n\t}\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.ToString())\n\t\t}\n\t})\n\texecutor, diagnostics := NewProviderInstallExecutor(&ProviderInstallExecutorOptions{\n\t\tPlans:             providerInstallPlans,\n\t\tMessageChannel:    messageChannel,\n\t\tDownloadWorkspace: \"./test_download\",\n\t})\n\tassert.False(t, utils.HasError(diagnostics))\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\td := executor.Execute(context.Background())\n\tassert.False(t, utils.HasError(d))\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tmessageChannel.ReceiverWait()\n}\n"
  },
  {
    "path": "pkg/modules/executors/test_data/test_fetch_module/modules.yaml",
    "content": ""
  },
  {
    "path": "pkg/modules/executors/test_data/test_fetch_module_with_cache/modules.yaml",
    "content": "selefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  #  connection:\n  #    type: postgres\n  #    username: postgres\n  #    password: pass\n  #    host: localhost\n  #    port: 5432\n  #    database: postgres\n  #    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: mock\n      source: mock\n      version: latest\n\nproviders:\n  - name: mock\n    cache: 1h\n    provider: mock\n    max_attempts: 10\n    max_backoff: 30\n    foo-count: 2\n    bar-count: 2\n    sleep-seconds: 1\n"
  },
  {
    "path": "pkg/modules/executors/test_data/test_query_module/modules.yaml",
    "content": "#rules:\n#  - name: bucket_versioning_is_disabled\n#    query: \"Please help me analyze the vulnerabilities in AWS S3?\"\n#    output: \"S3 bucket versioning is disabled, arn: {{.arn}}\"\n\nselefra:\n  openai_api_key: openaikey\n  openai_mode: gpt-3.5\n  openai_limit: 10\n  cloud:\n    project: example_project\n    organization: example_org\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: aws\n      version: latest\n\nproviders:\n  - name: aws\n    cache: 7d\n    provider: aws\n    max_goroutines: 100\n"
  },
  {
    "path": "pkg/modules/local_modules_manager/manager.go",
    "content": "package local_modules_manager\n\n// LocalModuleManager Manage the cache of locally downloaded modules\ntype LocalModuleManager struct {\n\tselefraHomeWorkspace string\n\tprojectWorkspace     string\n\tdownloadWorkspace    string\n}\n\nfunc NewLocalModuleManager() *LocalModuleManager {\n\treturn &LocalModuleManager{}\n}\n"
  },
  {
    "path": "pkg/modules/local_modules_manager/manager_get.go",
    "content": "package local_modules_manager\n\nimport \"github.com/selefra/selefra/pkg/registry\"\n\nfunc (x *LocalModuleManager) Get(module *registry.Module) {\n\n}\n"
  },
  {
    "path": "pkg/modules/local_modules_manager/manager_list.go",
    "content": "package local_modules_manager\n\nfunc (x *LocalModuleManager) List() {\n\n}\n"
  },
  {
    "path": "pkg/modules/local_modules_manager/manager_search.go",
    "content": "package local_modules_manager\n\nfunc (x *LocalModuleManager) Search() {\n\n}\n"
  },
  {
    "path": "pkg/modules/local_modules_manager/manager_tidy.go",
    "content": "package local_modules_manager\n\nfunc (x *LocalModuleManager) Tidy() {\n\n}"
  },
  {
    "path": "pkg/modules/local_modules_manager/manager_update.go",
    "content": "package local_modules_manager\n\nfunc (x *LocalModuleManager) Update() {\n\n}\n"
  },
  {
    "path": "pkg/modules/module/block.go",
    "content": "package module\n\nimport (\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Block each block should implement this interface\ntype Block interface {\n\n\t// Validator block should be able to check that it's configuration is correct\n\tValidator\n\n\t// Locatable every block should be addressable, so you have to be able to figure out where the text is in that block\n\t// yaml to module parser fills in the location of the Block, so you can get the original location and content of the Block when you need it\n\t// The location information should not change and should be fixed once parsed\n\tLocatable\n\n\t// IsEmpty Determines whether the block is empty\n\tIsEmpty() bool\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Validator A validator that supports checking\ntype Validator interface {\n\n\t// Check whether the node configuration is correct\n\tCheck(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// MergableBlock Used to indicate that a block is merge\ntype MergableBlock[T Block] interface {\n\n\t// Merge Used to merge two identical blocks\n\tMerge(other T) (T, *schema.Diagnostics)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// HaveRuntime Some blocks may have a runtime to handle more complex logic\ntype HaveRuntime[T any] interface {\n\n\t// Runtime Returns the runtime corresponding to the block\n\tRuntime() T\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module/errors.go",
    "content": "package module\n\nimport (\n\t\"fmt\"\n\t\"github.com/fatih/color\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tErrNotSupport = \"not support operation\"\n)\n\nconst DocSiteUrl = \"http://selefra.io/docs\"\n\n// RenderErrorTemplate Output Example:\n//\n// error[E827890]: syntax error, do not support modules[1].output\n//\n//\t -->  test_data\\test.yaml:83:7 ( modules[1].output )\n//\t| 78   - name: example_module\n//\t| 79     uses: ./rules/\n//\t| 80     input:\n//\t| 81       name: selefra\n//\t| 82     output:\n//\t| 83       - \"This is a test output message, resource region is {{.region}}.\"\n//\t|          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n//\t| 84\n//\t| 85 variables:\n//\t| 86   - key: test\n//\t| 87     default:\nfunc RenderErrorTemplate(errorType string, location *NodeLocation) string {\n\ts := strings.Builder{}\n\n\ts.WriteString(fmt.Sprintf(\"%s: %s \\n\", color.RedString(\"error[E827890]\"), errorType))\n\tif location == nil {\n\t\treturn s.String()\n\t}\n\ts.WriteString(fmt.Sprintf(\"%s %s:%d:%d ( %s ) \\n\", color.BlueString(\" --> \"), location.Path, location.Begin.Line, location.Begin.Column, location.YamlSelector))\n\n\tfile, err := os.ReadFile(location.Path)\n\tif err != nil {\n\t\t// TODO\n\t\treturn err.Error()\n\t}\n\tsplit := strings.Split(string(file), \"\\n\")\n\t// The number of characters used for lines depends on the actual number of lines in the file\n\tlineWidth := strconv.Itoa(len(strconv.Itoa(len(split))))\n\tfor lineIndex, lineString := range split {\n\t\t// There can be a newline problem on Windows platforms\n\t\tlineString = strings.TrimRight(lineString, \"\\r\")\n\t\trealLineIndex := lineIndex + 1\n\t\t// Go ahead and back a few more lines\n\t\tcutoff := 5\n\t\tif realLineIndex >= location.Begin.Line && realLineIndex <= location.End.Line {\n\t\t\tbegin := 0\n\t\t\tend := len(lineString) + 1\n\t\t\tif realLineIndex == location.Begin.Line {\n\t\t\t\tbegin = location.Begin.Column - 1\n\t\t\t}\n\t\t\tif realLineIndex == location.End.Line {\n\t\t\t\tend = location.End.Column - 1\n\t\t\t}\n\n\t\t\t//s.WriteString(fmt.Sprintf(\"| %\"+lineWidth+\"d: \", realLineIndex))\n\t\t\ts.WriteString(fmt.Sprintf(\"| %-\"+lineWidth+\"d \", realLineIndex))\n\t\t\ts.WriteString(lineString)\n\t\t\ts.WriteString(\"\\n\")\n\n\t\t\t// Error underlining\n\t\t\tunderline := withUnderline(lineString, begin, end)\n\t\t\tif underline != \"\" {\n\t\t\t\ts.WriteString(fmt.Sprintf(\"| %\"+lineWidth+\"s \", \" \"))\n\t\t\t\ts.WriteString(color.RedString(underline))\n\t\t\t\ts.WriteString(\"\\n\")\n\t\t\t}\n\n\t\t} else if (realLineIndex >= location.Begin.Line-cutoff && realLineIndex < location.Begin.Line) || (realLineIndex > location.End.Line && realLineIndex <= location.End.Line+cutoff) {\n\t\t\t//s.WriteString(fmt.Sprintf(\"| %\"+lineWidth+\"d: \", realLineIndex))\n\t\t\ts.WriteString(fmt.Sprintf(\"| %-\"+lineWidth+\"d \", realLineIndex))\n\t\t\ts.WriteString(lineString)\n\t\t\ts.WriteString(\"\\n\")\n\t\t}\n\t}\n\ts.WriteString(\"--> See our docs: \" + DocSiteUrl + \"\\n\")\n\n\treturn s.String()\n}\n\n// Underline the lines in red\nfunc withUnderline(line string, begin, end int) string {\n\tunderline := make([]string, 0)\n\tfor index, _ := range line {\n\t\tif index >= begin && index <= end {\n\t\t\tunderline = append(underline, color.RedString(\"^\"))\n\t\t} else {\n\t\t\tunderline = append(underline, color.RedString(\" \"))\n\t\t}\n\t}\n\tif len(underline) == 0 {\n\t\treturn \"\"\n\t}\n\treturn strings.Join(underline, \"\")\n}\n"
  },
  {
    "path": "pkg/modules/module/locatable.go",
    "content": "package module\n\nimport (\n\t\"github.com/golang-infrastructure/go-trie\"\n\t\"gopkg.in/yaml.v3\"\n\t\"os\"\n\t\"strings\"\n\t\"unicode/utf8\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Locatable Used to find the file and location of each block. All blocks should implement this interface\ntype Locatable interface {\n\n\t// GetNodeLocation Gets the location of the block\n\tGetNodeLocation(selector string) *NodeLocation\n\n\t// SetNodeLocation Set the location of the node\n\tSetNodeLocation(selector string, nodeLocation *NodeLocation) error\n}\n\n// NodeLocation A piece of location information used to represent a block\ntype NodeLocation struct {\n\n\t// The path from the root node of yaml to the current node\n\tYamlSelector string\n\n\t// path for find file, It is usually stored in a file system, which is the location of a file\n\tPath string\n\n\t// Represents a continuous piece of text in a file, with a starting position and an ending position\n\tBegin, End *Position\n}\n\nfunc BuildLocationFromYamlNode(yamlFilePath string, yamlSelector string, node *yaml.Node) *NodeLocation {\n\tendNode := rightLeafNode(node)\n\tif endNode == nil {\n\t\treturn nil\n\t}\n\treturn &NodeLocation{\n\t\tPath:         yamlFilePath,\n\t\tYamlSelector: yamlSelector,\n\t\tBegin:        NewPosition(node.Line, node.Column),\n\t\tEnd:          NewPosition(endNode.Line, endNode.Column+utf8.RuneCountInString(endNode.Value)),\n\t}\n}\n\n// Gets the end point of a node\nfunc rightLeafNode(node *yaml.Node) *yaml.Node {\n\tif node == nil || node.Kind == yaml.ScalarNode || len(node.Content) == 0 {\n\t\treturn node\n\t}\n\treturn rightLeafNode(node.Content[len(node.Content)-1])\n}\n\n// ReadSourceString Read the source string content based on location information\nfunc (x *NodeLocation) ReadSourceString() string {\n\tif x == nil {\n\t\treturn \"\"\n\t}\n\tfile, err := os.ReadFile(x.Path)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\tsplit := strings.Split(string(file), \"\\n\")\n\tbuff := strings.Builder{}\n\tinCollection := false\nloop:\n\tfor lineIndex, lineString := range split {\n\t\tfor columnIndex, columnCharacter := range lineString {\n\t\t\tif (lineIndex+1) >= x.Begin.Line && (columnIndex+1) >= x.Begin.Column {\n\t\t\t\tinCollection = true\n\t\t\t}\n\t\t\tif inCollection {\n\t\t\t\tbuff.WriteRune(columnCharacter)\n\t\t\t}\n\t\t\tif (lineIndex+1) >= x.End.Line && (columnIndex+1) >= x.End.Column {\n\t\t\t\tinCollection = false\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t\tif inCollection {\n\t\t\tbuff.WriteRune('\\n')\n\t\t}\n\t}\n\treturn buff.String()\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Position Represents a point in a file\ntype Position struct {\n\n\t// which line\n\tLine int\n\n\t// which column\n\tColumn int\n}\n\nfunc NewPosition(line, column int) *Position {\n\treturn &Position{\n\t\tLine:   line,\n\t\tColumn: column,\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype LocatableImpl struct {\n\tyamlSelectorTrie *trie.Trie[*NodeLocation]\n}\n\nvar _ Locatable = &LocatableImpl{}\n\nfunc NewLocatableImpl() *LocatableImpl {\n\treturn &LocatableImpl{\n\t\t// TODO Improve the efficiency of the tree\n\t\tyamlSelectorTrie: trie.New[*NodeLocation](trie.DefaultPathSplitFunc),\n\t}\n}\n\nconst (\n\tNodeLocationSelfKey   = \"._key\"\n\tNodeLocationSelfValue = \"._value\"\n)\n\nfunc (x *LocatableImpl) GetNodeLocation(relativeSelector string) *NodeLocation {\n\n\t// Example(with ._key or ._value):\n\t// foo._key\n\t// foo._value\n\tselectorPathLocation, err := x.yamlSelectorTrie.Query(relativeSelector)\n\tif err == nil {\n\t\treturn selectorPathLocation\n\t}\n\n\t// Example(without ._key or ._value):\n\t// foo\n\t// bar\n\tkeyLocation, keyErr := x.yamlSelectorTrie.Query(relativeSelector + NodeLocationSelfKey)\n\tvalueLocation, valueErr := x.yamlSelectorTrie.Query(relativeSelector + NodeLocationSelfValue)\n\tif keyErr != nil && valueErr != nil {\n\t\treturn nil\n\t}\n\treturn MergeKeyValueLocation(keyLocation, valueLocation)\n}\n\nfunc MergeKeyValueLocation(keyLocation, valueLocation *NodeLocation) *NodeLocation {\n\tif keyLocation == nil {\n\t\treturn valueLocation\n\t} else if valueLocation == nil {\n\t\treturn keyLocation\n\t}\n\n\treturn &NodeLocation{\n\t\tYamlSelector: keyLocation.YamlSelector,\n\t\tPath:         keyLocation.Path,\n\t\tBegin:        keyLocation.Begin,\n\t\tEnd:          valueLocation.End,\n\t}\n}\n\n// foo.bar.key --> foo.bar\n// foo.bar[1] --> foo.bar\nfunc baseYamlSelector(yamlSelector string) string {\n\tif len(yamlSelector) == 0 {\n\t\treturn \"\"\n\t}\n\n\t// Look for boundary characters\n\tvar delimiterCharacter byte\n\tswitch yamlSelector[len(yamlSelector)-1] {\n\tcase ']':\n\t\tdelimiterCharacter = '['\n\tdefault:\n\t\tdelimiterCharacter = '.'\n\t}\n\n\tfor index := len(yamlSelector) - 2; index >= 0; index-- {\n\t\tif yamlSelector[index] == delimiterCharacter {\n\t\t\treturn yamlSelector[0:index]\n\t\t}\n\t}\n\n\treturn \"\"\n}\n\nfunc (x *LocatableImpl) SetNodeLocation(relativeSelector string, nodeLocation *NodeLocation) error {\n\treturn x.yamlSelectorTrie.Add(relativeSelector, nodeLocation)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module/locatable_test.go",
    "content": "package module\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc Test_baseYamlSelector(t *testing.T) {\n\ts := baseYamlSelector(\"foo.bar._key\")\n\tassert.Equal(t, \"foo.bar\", s)\n\n\ts = baseYamlSelector(\"foo.bar[1]\")\n\tassert.Equal(t, \"foo.bar\", s)\n\n\ts = baseYamlSelector(\"f\")\n\tassert.Equal(t, \"\", s)\n}\n"
  },
  {
    "path": "pkg/modules/module/module.go",
    "content": "package module\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/reflect_util\"\n\t\"reflect\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Module Represents information about a module\ntype Module struct {\n\n\t// Several root-level blocks of a module\n\tSelefraBlock   *SelefraBlock\n\tModulesBlock   ModulesBlock\n\tProvidersBlock ProvidersBlock\n\tRulesBlock     RulesBlock\n\tVariablesBlock VariablesBlock\n\n\t// Parent of the current module\n\tParentModule *Module\n\n\t// What are the submodules of the current module, [subModuleName, *subModule]\n\t// Keep the order of references\n\tSubModules []*Module\n\n\t// The source of the module, in fact, is the string written inside use\n\t// The source of the root module is the current path\n\tSource string\n\t// Local path of the module\n\tModuleLocalDirectory string\n\n\t// How is the dependency from the top-level module to the current module, in fact, all the way to use the concatenation\n\tDependenciesPath []string\n}\n\nfunc NewModule() *Module {\n\treturn &Module{}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// BuildFullName The full path name of the module, which can be understood at a glance\nfunc (x *Module) BuildFullName() string {\n\tif x.Source == \"\" {\n\t\treturn x.ModuleLocalDirectory\n\t} else {\n\t\treturn fmt.Sprintf(\"%s @ %s\", x.Source, x.ModuleLocalDirectory)\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// HasRequiredProviderName check whether the required provider name is available\nfunc (x *Module) HasRequiredProviderName(requiredProviderName string) bool {\n\tfor _, requiredProvider := range x.SelefraBlock.RequireProvidersBlock {\n\t\tif requiredProvider.Name == requiredProviderName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// ListRequiredProvidersName List the names of all required providers\nfunc (x *Module) ListRequiredProvidersName() []string {\n\trequiredProviderNameSlice := make([]string, len(x.SelefraBlock.RequireProvidersBlock))\n\tfor index, requiredProvider := range x.SelefraBlock.RequireProvidersBlock {\n\t\trequiredProviderNameSlice[index] = requiredProvider.Name\n\t}\n\treturn requiredProviderNameSlice\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Merge the two modules into a new module\nfunc (x *Module) Merge(other *Module) (*Module, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// Only independent, unrelated modules can be merged, as if they were different configuration files in the same path\n\tif x.ParentModule != nil || len(x.SubModules) != 0 || other.ParentModule != nil || len(other.SubModules) != 0 {\n\t\treturn nil, diagnostics.AddErrorMsg(\"can not merge module it have parent module or submodules\")\n\t}\n\n\tmergedModule := NewModule()\n\t// The blocks at the root level are merged one by one\n\tmergedModule.SelefraBlock = MergeBlockWithDiagnostics(x.SelefraBlock, other.SelefraBlock, diagnostics)\n\tmergedModule.ModulesBlock = MergeBlockWithDiagnostics(x.ModulesBlock, other.ModulesBlock, diagnostics)\n\tmergedModule.ProvidersBlock = MergeBlockWithDiagnostics(x.ProvidersBlock, other.ProvidersBlock, diagnostics)\n\tmergedModule.RulesBlock = MergeBlockWithDiagnostics(x.RulesBlock, other.RulesBlock, diagnostics)\n\tmergedModule.VariablesBlock = MergeBlockWithDiagnostics(x.VariablesBlock, other.VariablesBlock, diagnostics)\n\n\treturn mergedModule, diagnostics\n}\n\n// MergeBlockWithDiagnostics Merge two blocks\nfunc MergeBlockWithDiagnostics[T Block](blockA, blockB T, diagnostics *schema.Diagnostics) T {\n\tvar zero T\n\tif !reflect_util.IsNil(blockA) && !reflect_util.IsNil(blockB) {\n\t\treflectValueA := reflect.ValueOf(blockA)\n\t\tif reflectValueA.CanInterface() {\n\t\t\tmergableBlockA, ok := reflectValueA.Interface().(MergableBlock[T])\n\t\t\tif !ok {\n\t\t\t\t// TODO error message\n\t\t\t\tdiagnostics.AddErrorMsg(\"can not convert block to MergableBlock\")\n\t\t\t\treturn zero\n\t\t\t}\n\t\t\tmerge, d := mergableBlockA.Merge(blockB)\n\t\t\tdiagnostics.AddDiagnostics(d)\n\t\t\tif d == nil || !d.HasError() {\n\t\t\t\treturn merge\n\t\t\t} else {\n\t\t\t\treturn zero\n\t\t\t}\n\t\t} else {\n\t\t\t// TODO build humanreadable error message\n\t\t\tdiagnostics.AddErrorMsg(\"can not convert block to MergableBlock\")\n\t\t\treturn zero\n\t\t}\n\t} else if reflect_util.IsNil(blockA) {\n\t\treturn blockB\n\t} else {\n\t\treturn blockA\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *Module) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tif x.ModulesBlock != nil {\n\t\tdiagnostics.AddDiagnostics(x.ModulesBlock.Check(x, validatorContext))\n\t}\n\n\tif x.ProvidersBlock != nil {\n\t\tdiagnostics.AddDiagnostics(x.ProvidersBlock.Check(x, validatorContext))\n\t}\n\n\tif x.SelefraBlock != nil {\n\t\tdiagnostics.AddDiagnostics(x.SelefraBlock.Check(x, validatorContext))\n\t} else {\n\t\terrorTips := fmt.Sprintf(\"module %s selefra block must can not lack\", x.BuildFullName())\n\t\tdiagnostics.AddErrorMsg(errorTips)\n\t}\n\n\tif x.RulesBlock != nil {\n\t\tdiagnostics.AddDiagnostics(x.RulesBlock.Check(x, validatorContext))\n\t}\n\n\tif x.VariablesBlock != nil {\n\t\tdiagnostics.AddDiagnostics(x.VariablesBlock.Check(x, validatorContext))\n\t}\n\n\t// check submodules\n\tfor _, subModule := range x.SubModules {\n\t\tdiagnostics.AddDiagnostics(subModule.Check(subModule, validatorContext))\n\t}\n\n\treturn diagnostics\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype TraversalContext struct {\n\tParentTraversalContext *TraversalContext\n\n\tParentModule *Module\n\tModule       *Module\n}\n\nfunc (x *Module) Traversal(ctx context.Context, traversalFunc func(ctx context.Context, traversalContext *TraversalContext) bool) {\n\tx.internalTraversal(ctx, &TraversalContext{ParentTraversalContext: nil, ParentModule: nil, Module: x}, traversalFunc)\n}\n\nfunc (x *Module) internalTraversal(ctx context.Context, traversalContext *TraversalContext, traversalFunc func(ctx context.Context, traversalContext *TraversalContext) bool) {\n\n\tif !traversalFunc(ctx, traversalContext) {\n\t\treturn\n\t}\n\n\tfor _, subModule := range traversalContext.Module.SubModules {\n\t\tx.internalTraversal(ctx, &TraversalContext{ParentTraversalContext: traversalContext, ParentModule: traversalContext.Module, Module: subModule}, traversalFunc)\n\t}\n\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module/modules_block.go",
    "content": "package module\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ModulesBlock []*ModuleBlock\n\nvar _ Block = (*ModulesBlock)(nil)\nvar _ MergableBlock[ModulesBlock] = (*ModulesBlock)(nil)\n\nfunc (x ModulesBlock) Merge(other ModulesBlock) (ModulesBlock, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tmoduleNameSet := make(map[string]struct{})\n\tmergedModules := make(ModulesBlock, 0)\n\n\t// merge myself\n\tfor _, moduleBlock := range x {\n\t\tif _, exists := moduleNameSet[moduleBlock.Name]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Module with the same name is not allowed in the same module. The module name %s is the duplication\", moduleBlock.Name)\n\t\t\treport := RenderErrorTemplate(errorTips, moduleBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tmergedModules = append(mergedModules, moduleBlock)\n\t\tmoduleNameSet[moduleBlock.Name] = struct{}{}\n\t}\n\n\t// merge other\n\tfor _, moduleBlock := range other {\n\t\tif _, exists := moduleNameSet[moduleBlock.Name]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Module with the same name is not allowed in the same module. The module name %s is the duplication\", moduleBlock.Name)\n\t\t\treport := RenderErrorTemplate(errorTips, moduleBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tmergedModules = append(mergedModules, moduleBlock)\n\t\tmoduleNameSet[moduleBlock.Name] = struct{}{}\n\t}\n\n\treturn mergedModules, diagnostics\n}\n\nfunc (x ModulesBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\tdiagnostics := schema.NewDiagnostics()\n\tfor _, moduleBlock := range x {\n\t\tdiagnostics.AddDiagnostics(moduleBlock.Check(module, validatorContext))\n\t}\n\treturn diagnostics\n}\n\nfunc (x ModulesBlock) IsEmpty() bool {\n\treturn len(x) == 0\n}\n\nfunc (x ModulesBlock) GetNodeLocation(selector string) *NodeLocation {\n\tpanic(\"not supported\")\n}\n\nfunc (x ModulesBlock) SetNodeLocation(selector string, nodeLocation *NodeLocation) error {\n\tpanic(\"not supported\")\n}\n\nfunc (x ModulesBlock) ModulesInputMap() map[string]*ModuleBlock {\n\tmodulesInputMap := make(map[string]*ModuleBlock)\n\tfor _, subModuleBlock := range x {\n\t\tmodulesInputMap[subModuleBlock.Uses] = subModuleBlock\n\t}\n\treturn modulesInputMap\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype Filter struct {\n\tName     string `yaml:\"name\" json:\"name\"`\n\tSeverity string `yaml:\"severity\" json:\"severity\"`\n\tProvider string `yaml:\"provider\" json:\"provider\"`\n}\n\n// ModuleBlock Used to represent a common element in the modules array\ntype ModuleBlock struct {\n\n\t// Module name\n\tName string `yaml:\"name\" json:\"name\"`\n\n\t// What other modules are referenced by this module\n\tUses string `yaml:\"uses\" json:\"uses\"`\n\n\t// The module supports specifying some filters\n\tFilter []Filter `yaml:\"filter\" json:\"filter\"`\n\n\t// The module supports specifying some variables\n\tInput map[string]any `yaml:\"input\" json:\"input\"`\n\n\t*LocatableImpl `yaml:\"-\"`\n}\n\nvar _ Block = &ModuleBlock{}\n\nfunc NewModuleBlock() *ModuleBlock {\n\treturn &ModuleBlock{\n\t\tLocatableImpl: NewLocatableImpl(),\n\t}\n}\n\nfunc (x *ModuleBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tif x.Name == \"\" {\n\t\terrorTips := fmt.Sprintf(\"Module name must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"name\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\tif len(x.Uses) == 0 {\n\t\terrorTips := fmt.Sprintf(\"Module uses must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"uses\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\tif len(x.Input) != 0 {\n\t\tdiagnostics.AddDiagnostics(x.checkInput(module, validatorContext))\n\t}\n\n\treturn diagnostics\n}\n\nfunc (x *ModuleBlock) checkInput(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\t// nothing to do now\n\treturn nil\n}\n\nfunc (x *ModuleBlock) IsEmpty() bool {\n\tif x == nil {\n\t\treturn true\n\t}\n\treturn x.Name == \"\" && len(x.Uses) == 0 && len(x.Input) == 0\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module/providers_block.go",
    "content": "package module\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/pointer\"\n\t\"gopkg.in/yaml.v3\"\n\t\"strconv\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProvidersBlock The root level providers block\ntype ProvidersBlock []*ProviderBlock\n\nvar _ MergableBlock[ProvidersBlock] = (*ProvidersBlock)(nil)\nvar _ Block = (*ProvidersBlock)(nil)\n\nfunc (x ProvidersBlock) ToProviderNameMap() map[string]*ProviderBlock {\n\tm := make(map[string]*ProviderBlock)\n\tfor _, p := range x {\n\t\tm[p.Provider] = p\n\t}\n\treturn m\n}\n\nfunc (x ProvidersBlock) Merge(other ProvidersBlock) (ProvidersBlock, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tnameSet := make(map[string]struct{}, 0)\n\tmergedProviders := make([]*ProviderBlock, 0)\n\n\t// merge self\n\tfor _, providerBlock := range x {\n\t\tif _, exists := nameSet[providerBlock.Name]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Provider with the same name is not allowed in the same module. The provider name %s is duplication\", providerBlock.Name)\n\t\t\treport := RenderErrorTemplate(errorTips, providerBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tmergedProviders = append(mergedProviders, providerBlock)\n\t}\n\n\t// merge other\n\tfor _, providerBlock := range other {\n\t\tif _, exists := nameSet[providerBlock.Name]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Provider with the same name is not allowed in the same module. The provider name %s is duplication\", providerBlock.Name)\n\t\t\treport := RenderErrorTemplate(errorTips, providerBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tmergedProviders = append(mergedProviders, providerBlock)\n\t}\n\n\treturn mergedProviders, diagnostics\n}\n\nfunc (x ProvidersBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\tdiagnostics := schema.NewDiagnostics()\n\n\tproviderNameSet := make(map[string]struct{}, 0)\n\tfor _, providerBlock := range x {\n\t\tif _, exists := providerNameSet[providerBlock.Name]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Provider with the same name is not allowed in the same module. The provider name %s is duplication\", providerBlock.Name)\n\t\t\treport := RenderErrorTemplate(errorTips, providerBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tdiagnostics.AddDiagnostics(providerBlock.Check(module, validatorContext))\n\t\tproviderNameSet[providerBlock.Name] = struct{}{}\n\t}\n\treturn diagnostics\n}\n\nfunc (x ProvidersBlock) IsEmpty() bool {\n\treturn len(x) == 0\n}\n\nfunc (x ProvidersBlock) GetNodeLocation(selector string) *NodeLocation {\n\t//TODO implement me\n\tpanic(\"implement me\")\n}\n\nfunc (x ProvidersBlock) SetNodeLocation(selector string, nodeLocation *NodeLocation) error {\n\t//TODO implement me\n\tpanic(\"implement me\")\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProviderBlock An element in the providers block array at the root level\ntype ProviderBlock struct {\n\n\t// Name of the current block\n\tName string\n\n\t// How long is the cache\n\tCache string\n\n\t// Which of the selefra.providers is associated with\n\tProvider string\n\n\t// What is the maximum concurrency when pulling data\n\tMaxGoroutines *uint64\n\n\t// What resources need to be pulled? If you do not write, the default is to pull all resources\n\tResources []string\n\n\t// What are the self-defined configurations of the provider? These should be passed to the provider through\n\tProvidersConfigYamlString string\n\n\t*LocatableImpl `yaml:\"-\"`\n}\n\nvar _ yaml.Marshaler = (*ProviderBlock)(nil)\nvar _ Block = (*ProviderBlock)(nil)\n\nfunc NewProviderBlock() *ProviderBlock {\n\treturn &ProviderBlock{\n\t\tLocatableImpl: NewLocatableImpl(),\n\t}\n}\n\nfunc (x *ProviderBlock) MarshalYAML() (interface{}, error) {\n\tconfigurationMappingNode := &yaml.Node{\n\t\tKind: yaml.MappingNode,\n\t}\n\n\t// name\n\tconfigurationMappingNode.Content = append(configurationMappingNode.Content, &yaml.Node{Kind: yaml.ScalarNode, Value: \"name\"})\n\tconfigurationMappingNode.Content = append(configurationMappingNode.Content, &yaml.Node{Kind: yaml.ScalarNode, Value: x.Name})\n\n\t// cache\n\tconfigurationMappingNode.Content = append(configurationMappingNode.Content, &yaml.Node{Kind: yaml.ScalarNode, Value: \"cache\"})\n\tconfigurationMappingNode.Content = append(configurationMappingNode.Content, &yaml.Node{Kind: yaml.ScalarNode, Value: x.Cache})\n\n\t// provider\n\tconfigurationMappingNode.Content = append(configurationMappingNode.Content, &yaml.Node{Kind: yaml.ScalarNode, Value: \"provider\"})\n\tconfigurationMappingNode.Content = append(configurationMappingNode.Content, &yaml.Node{Kind: yaml.ScalarNode, Value: x.Provider})\n\n\t// max_goroutines\n\tif x.MaxGoroutines != nil {\n\t\tconfigurationMappingNode.Content = append(configurationMappingNode.Content, &yaml.Node{Kind: yaml.ScalarNode, Value: \"max_goroutines\"})\n\t\tconfigurationMappingNode.Content = append(configurationMappingNode.Content, &yaml.Node{Kind: yaml.ScalarNode, Value: strconv.Itoa(int(*x.MaxGoroutines))})\n\t}\n\n\t// resources\n\tif len(x.Resources) != 0 {\n\t\tconfigurationMappingNode.Content = append(configurationMappingNode.Content, &yaml.Node{Kind: yaml.ScalarNode, Value: \"resources\"})\n\t\tresourcesNode := &yaml.Node{Kind: yaml.SequenceNode}\n\t\tfor _, resourceName := range x.Resources {\n\t\t\tresourcesNode.Content = append(resourcesNode.Content, &yaml.Node{Kind: yaml.ScalarNode, Value: resourceName})\n\t\t}\n\t\tconfigurationMappingNode.Content = append(configurationMappingNode.Content, resourcesNode)\n\t}\n\n\tif x.ProvidersConfigYamlString != \"\" {\n\t\t//fmt.Println(\"is not empty! \" + x.ProvidersConfigYamlString)\n\t\tvar customProviderConfiguration yaml.Node\n\t\terr := yaml.Unmarshal([]byte(x.ProvidersConfigYamlString), &customProviderConfiguration)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// for debug\n\t\t//fmt.Println(fmt.Sprintf(\"Content length: %d\", len(customProviderConfiguration.Content)))\n\t\t//fmt.Println(\"HeadComment： \" + customProviderConfiguration.HeadComment)\n\t\t//fmt.Println(\"FootComment： \" + customProviderConfiguration.FootComment)\n\t\t//fmt.Println(\"LineComment： \" + customProviderConfiguration.LineComment)\n\n\t\tif len(customProviderConfiguration.Content) > 0 {\n\t\t\t//fmt.Println(fmt.Sprintf(\"Content customProviderConfiguration.Content[0].Content length: %d\", len(customProviderConfiguration.Content[0].Content)))\n\t\t\tfor _, node := range customProviderConfiguration.Content[0].Content {\n\t\t\t\tconfigurationMappingNode.Content = append(configurationMappingNode.Content, node)\n\t\t\t}\n\t\t} else if len(configurationMappingNode.Content) != 0 {\n\t\t\t// In the case of all comments, the default configuration item is added after the current item as a comment\n\t\t\tconfigurationMappingNode.Content[len(configurationMappingNode.Content)-1].FootComment = x.ProvidersConfigYamlString\n\t\t}\n\t}\n\n\t//fmt.Println(fmt.Sprintf(\"Content length: %d\", len(configurationMappingNode.Content)))\n\n\treturn configurationMappingNode, nil\n}\n\n// GetDefaultProviderConfigYamlConfiguration If the provider is not configured, this is the default configuration\nfunc GetDefaultProviderConfigYamlConfiguration(providerName, providerVersion string) string {\n\tblock := ProviderBlock{\n\t\tName:          \"default-\" + providerName,\n\t\tCache:         \"1d\",\n\t\tProvider:      providerName,\n\t\tMaxGoroutines: pointer.ToUInt64Pointer(50),\n\t}\n\tout, _ := yaml.Marshal(block)\n\treturn string(out)\n}\n\nfunc (x *ProviderBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tif x.Name == \"\" {\n\t\terrorTips := fmt.Sprintf(\"Provider configuration name must not be empty\")\n\t\t// TODO maybe nil\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"name\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t} else if !CheckIdentity(x.Name) {\n\t\terrorTips := fmt.Sprintf(\"Provider configuration name \" + CheckIdentityErrorMsg)\n\t\t// TODO maybe nil\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"name\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\tif x.Provider == \"\" && !module.HasRequiredProviderName(x.Provider) {\n\t\terrorTips := fmt.Sprintf(\"Provider name %s not found in selefra.providers\", x.Provider)\n\t\t// TODO maybe nil\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"provider\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\tif x.MaxGoroutines != nil {\n\t\tif *x.MaxGoroutines > 3000 {\n\t\t\terrorTips := fmt.Sprintf(\"Provider %s max_goroutines is too big\", x.Name)\n\t\t\t// TODO maybe nil\n\t\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"max_goroutines\"))\n\t\t\tdiagnostics.AddWarn(report)\n\t\t} else if *x.MaxGoroutines < 0 {\n\t\t\terrorTips := fmt.Sprintf(\"Provider %s max_goroutines must greater than 0 \", x.Name)\n\t\t\t// TODO maybe nil\n\t\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"max_goroutines\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t}\n\t}\n\n\t// The cache may not be filled, but if it is, it must be valid and parsable\n\tif x.Cache != \"\" {\n\t\t// TODO\n\t\t_, err := ParseDuration(x.Cache)\n\t\tif err != nil {\n\t\t\terrorTips := fmt.Sprintf(\"Provider %s cache parse failed: %s \", x.Name, err.Error())\n\t\t\t// TODO maybe nil\n\t\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"cache\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t}\n\t}\n\n\treturn diagnostics\n}\n\nfunc (x *ProviderBlock) IsEmpty() bool {\n\treturn x.Name == \"\" &&\n\t\tx.Cache == \"\" &&\n\t\tx.Provider == \"\" &&\n\t\tx.MaxGoroutines == nil &&\n\t\tlen(x.Resources) == 0 &&\n\t\tx.ProvidersConfigYamlString == \"\"\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module/providers_block_test.go",
    "content": "package module\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"gopkg.in/yaml.v3\"\n\t\"testing\"\n)\n\nfunc TestProviderBlock_MarshalYAML(t *testing.T) {\n\tblock := NewProviderBlock()\n\n\tblock.Name = \"foo\"\n\tblock.Cache = \"1d\"\n\tblock.Provider = \"aws\"\n\tblock.ProvidersConfigYamlString = `    #  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n    accounts:\n      #     Optional. User identification\n      - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n        #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n        shared_config_profile: < PROFILE_NAME >\n        #    Optional. Location of shared configuration files\n        shared_config_files:\n          - <FILE_PATH>\n        #   Optional. Location of shared credentials files\n        shared_credentials_files:\n          - <FILE_PATH>\n        #    Optional. Role ARN we want to assume when accessing this account\n        role_arn: < YOUR_ROLE_ARN >\n        #    Optional. Named role session to grab specific operation under the assumed role\n        role_session_name: <SESSION_NAME>\n        #    Optional. Any outside of the org account id that has additional control\n        external_id: <ID>\n        #    Optional. Designated region of servers\n        default_region: <REGION_CODE>\n        #    Optional. by default assumes all regions\n        regions:\n          - us-east-1\n          - us-west-2\n    #    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n    max_attempts: 10\n    #    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n    max_backoff: 30`\n\n\tout, err := yaml.Marshal(block)\n\tassert.Nil(t, err)\n\tt.Log(string(out))\n}\n"
  },
  {
    "path": "pkg/modules/module/rules_block.go",
    "content": "package module\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"strings\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype RulesBlock []*RuleBlock\n\nvar _ MergableBlock[RulesBlock] = (*RulesBlock)(nil)\nvar _ Block = (*RulesBlock)(nil)\n\nfunc (x RulesBlock) Merge(other RulesBlock) (RulesBlock, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tmergedRules := make(RulesBlock, 0)\n\truleNameSet := make(map[string]struct{}, 0)\n\n\t// merge self\n\tfor _, ruleBlock := range x {\n\t\tif _, exists := ruleNameSet[ruleBlock.Name]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Rule with the same name is not allowed in the same module. The rule name %s is duplication\", ruleBlock.Name)\n\t\t\treport := RenderErrorTemplate(errorTips, ruleBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\truleNameSet[ruleBlock.Name] = struct{}{}\n\t\tmergedRules = append(mergedRules, ruleBlock)\n\t}\n\n\t// merge other\n\tfor _, ruleBlock := range other {\n\t\tif _, exists := ruleNameSet[ruleBlock.Name]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Rule with the same name is not allowed in the same module. The rule name %s is duplication\", ruleBlock.Name)\n\t\t\treport := RenderErrorTemplate(errorTips, ruleBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\truleNameSet[ruleBlock.Name] = struct{}{}\n\t\tmergedRules = append(mergedRules, ruleBlock)\n\t}\n\n\treturn mergedRules, diagnostics\n}\n\nfunc (x RulesBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// Each block should be able to pass inspection\n\tfor _, ruleBlock := range x {\n\t\tdiagnostics.AddDiagnostics(ruleBlock.Check(module, validatorContext))\n\t}\n\n\treturn diagnostics\n}\n\nfunc (x RulesBlock) IsEmpty() bool {\n\treturn len(x) == 0\n}\n\nfunc (x RulesBlock) GetNodeLocation(selector string) *NodeLocation {\n\tpanic(ErrNotSupport)\n}\n\nfunc (x RulesBlock) SetNodeLocation(selector string, nodeLocation *NodeLocation) error {\n\tpanic(ErrNotSupport)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype GptResponseBlock struct {\n\tTitle       string   `json:\"title\"`\n\tDescription string   `json:\"description\"`\n\tRemediation string   `json:\"remediation\"`\n\tSeverity    string   `json:\"severity\"`\n\tTags        []string `json:\"tags\"`\n\tResource    string   `json:\"resource\"`\n}\n\n// RuleBlock Represents a rule block\ntype RuleBlock struct {\n\n\t// Name of policy\n\tName string `yaml:\"name\" json:\"name\"`\n\n\t// Query statement corresponding to the policy\n\tQuery string `yaml:\"query\" json:\"query\"`\n\n\t// Some custom tags\n\tLabels map[string]interface{} `yaml:\"labels\" json:\"labels\"`\n\n\t// Metadata for the policy\n\tMetadataBlock *RuleMetadataBlock `json:\"metadata\" yaml:\"metadata\"`\n\n\t// Policy output\n\tOutput string `yaml:\"output\" json:\"output\"`\n\n\tMainTable string `yaml:\"main_table\" json:\"main_table\"`\n\n\t*LocatableImpl `yaml:\"-\"`\n}\n\nvar _ Block = &RuleBlock{}\nvar _ Validator = &RuleBlock{}\n\nfunc NewRuleBlock() *RuleBlock {\n\treturn &RuleBlock{\n\t\tLocatableImpl: NewLocatableImpl(),\n\t}\n}\n\nfunc (x *RuleBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// name\n\tif x.Name == \"\" {\n\t\terrorTips := fmt.Sprintf(\"Rule name must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"name\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\t// query\n\tif x.Query == \"\" {\n\t\terrorTips := fmt.Sprintf(\"Rule query must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"query\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\t// output\n\tif x.Output == \"\" {\n\t\terrorTips := fmt.Sprintf(\"Rule output must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"output\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\tif x.MetadataBlock == nil {\n\t\terrorTips := fmt.Sprintf(\"Rule metadata must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"metadata\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\tif x.MetadataBlock != nil {\n\t\tdiagnostics.AddDiagnostics(x.MetadataBlock.Check(module, validatorContext))\n\t}\n\n\treturn diagnostics\n}\n\nfunc (x *RuleBlock) IsEmpty() bool {\n\treturn x.Name == \"\" &&\n\t\tlen(x.Labels) == 0 &&\n\t\tx.Query == \"\" &&\n\t\t(x.MetadataBlock == nil || x.MetadataBlock.IsEmpty()) &&\n\t\tx.Output == \"\"\n}\n\nfunc (x *RuleBlock) Copy() *RuleBlock {\n\truleBlock := &RuleBlock{\n\t\tName:          x.Name,\n\t\tQuery:         x.Query,\n\t\tLabels:        x.Labels,\n\t\tOutput:        x.Output,\n\t\tMainTable:     x.MainTable,\n\t\tLocatableImpl: x.LocatableImpl,\n\t}\n\tif x.MetadataBlock != nil {\n\t\truleBlock.MetadataBlock = x.MetadataBlock.Copy(NewRuleMetadataBlockRuntime(ruleBlock))\n\t}\n\treturn ruleBlock\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// RuleMetadataBlock Represents metadata information for a block\ntype RuleMetadataBlock struct {\n\n\t// A globally unique policy ID\n\tId string `yaml:\"id\" json:\"id\"`\n\n\t// The severity of the problem\n\tSeverity string `yaml:\"severity\" json:\"severity\"`\n\n\t// The Provider to which it is associated\n\tProvider string `yaml:\"provider\" json:\"provider\"`\n\n\t// Some custom tags\n\tTags []string `yaml:\"tags\" json:\"tags\"`\n\n\t// Who is the author of the strategy\n\tAuthor string `yaml:\"author\" json:\"author\"`\n\n\t// The fix must be a local file relative path that points to a Markdown file\n\tRemediation string `yaml:\"remediation\" json:\"remediation\"`\n\n\t// Bug title\n\tTitle string `yaml:\"title\" json:\"title\"`\n\n\t// Some description of the Bug\n\tDescription string `yaml:\"description\" json:\"description\"`\n\n\tMainTable string `yaml:\"main_table\" json:\"main_table\"`\n\n\t*LocatableImpl `yaml:\"-\"`\n\truntime        *RuleMetadataBlockRuntime\n}\n\nvar _ Block = &RuleMetadataBlock{}\nvar _ Validator = &RuleMetadataBlock{}\n\nfunc NewRuleMetadataBlock(rule *RuleBlock) *RuleMetadataBlock {\n\tx := &RuleMetadataBlock{\n\t\tLocatableImpl: NewLocatableImpl(),\n\t}\n\tx.runtime = NewRuleMetadataBlockRuntime(rule)\n\treturn x\n}\n\nfunc (x *RuleMetadataBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\t// The rule id must be globally unique if it specifies\n\tif x.Id != \"\" && x.runtime != nil {\n\t\tif _, exists := validatorContext.GetRuleBlockById(x.Id); exists {\n\t\t\terrorTips := fmt.Sprintf(\"Rule metadata id must not duplication: %s\", x.Id)\n\t\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"id\"+NodeLocationSelfValue))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t} else {\n\t\t\tvalidatorContext.AddRuleBlock(x.runtime.rule)\n\t\t}\n\t}\n\n\tif x.Remediation != \"\" && x.runtime != nil {\n\t\tif strings.Contains(x.Remediation, \"..\") {\n\t\t\terrorTips := fmt.Sprintf(\"Rule %s metadata remediation file path can not contains ..\", x.runtime.rule.Name)\n\t\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"remediation\"+NodeLocationSelfValue))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t}\n\t\t//else {\n\t\t//\tremediationFileExists := filepath.Join(utils.AbsPath(module.ModuleLocalDirectory), x.Remediation)\n\t\t//\tif !utils.Exists(remediationFileExists) {\n\t\t//\t\terrorTips := fmt.Sprintf(\"Rule %s metadata remediation file do not exists or it is not file\", x.runtime.rule.Name)\n\t\t//\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"remediation\"+NodeLocationSelfValue))\n\t\t//\t\tdiagnostics.AddErrorMsg(report)\n\t\t//\t}\n\t\t//}\n\t}\n\n\treturn diagnostics\n}\n\nfunc (x *RuleMetadataBlock) IsEmpty() bool {\n\treturn x.Id == \"\" &&\n\t\tx.Severity == \"\" &&\n\t\tx.Provider == \"\" &&\n\t\tlen(x.Tags) == 0 &&\n\t\tx.Author == \"\" &&\n\t\tx.Remediation == \"\" &&\n\t\tx.Title == \"\" &&\n\t\tx.Description == \"\"\n}\n\nfunc (x *RuleMetadataBlock) Copy(runtime *RuleMetadataBlockRuntime) *RuleMetadataBlock {\n\treturn &RuleMetadataBlock{\n\t\tId:            x.Id,\n\t\tSeverity:      x.Severity,\n\t\tProvider:      x.Provider,\n\t\tTags:          x.Tags,\n\t\tAuthor:        x.Author,\n\t\tTitle:         x.Title,\n\t\tDescription:   x.Description,\n\t\tRemediation:   x.Remediation,\n\t\tLocatableImpl: x.LocatableImpl,\n\t\truntime:       runtime,\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype RuleMetadataBlockRuntime struct {\n\trule *RuleBlock\n}\n\nfunc NewRuleMetadataBlockRuntime(rule *RuleBlock) *RuleMetadataBlockRuntime {\n\treturn &RuleMetadataBlockRuntime{\n\t\trule: rule,\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module/selefra_block.go",
    "content": "package module\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"os\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// SelefraBlock One of the root-level blocks\ntype SelefraBlock struct {\n\n\t// Name of project\n\tName string `yaml:\"name,omitempty\" mapstructure:\"name,omitempty\"`\n\n\t// selefra CloudBlock-related configuration\n\tCloudBlock *CloudBlock `yaml:\"cloud,omitempty\" mapstructure:\"cloud,omitempty\"`\n\n\tOpenaiApiKey string `yaml:\"openai_api_key,omitempty\" mapstructure:\"openai_api_key,omitempty\"`\n\tOpenaiMode   string `yaml:\"openai_mode,omitempty\" mapstructure:\"openai_mode,omitempty\"`\n\tOpenaiLimit  uint64 `yaml:\"openai_limit,omitempty\" mapstructure:\"openai_limit,omitempty\"`\n\n\t// The version of the cli used by the project\n\tCliVersion string `yaml:\"cli_version,omitempty\" mapstructure:\"cli_version,omitempty\"`\n\n\t// Global log level. This level is used when the provider does not specify a log level\n\tLogLevel string `yaml:\"log_level,omitempty\" mapstructure:\"log_level,omitempty\"`\n\n\t//What are the providers required for operation\n\tRequireProvidersBlock RequireProvidersBlock `yaml:\"providers,omitempty\" mapstructure:\"providers,omitempty\"`\n\n\t// The configuration required to connect to the database\n\tConnectionBlock *ConnectionBlock `yaml:\"connection,omitempty\" mapstructure:\"connection,omitempty\"`\n\n\t*LocatableImpl `yaml:\"-\"`\n}\n\nvar _ Block = &SelefraBlock{}\nvar _ MergableBlock[*SelefraBlock] = &SelefraBlock{}\n\nfunc NewSelefraBlock() *SelefraBlock {\n\treturn &SelefraBlock{\n\t\tLocatableImpl: NewLocatableImpl(),\n\t}\n}\n\nfunc (x *SelefraBlock) GetOpenaiApiKey() string {\n\tif x.OpenaiApiKey != \"\" {\n\t\treturn x.OpenaiApiKey\n\t}\n\treturn os.Getenv(\"OPENAI_API_KEY\")\n}\n\nfunc (x *SelefraBlock) GetOpenaiMode() string {\n\tif x.OpenaiMode != \"\" {\n\t\treturn x.OpenaiMode\n\t}\n\tif os.Getenv(\"OPENAI_MODE\") != \"\" {\n\t\treturn os.Getenv(\"OPENAI_MODE\")\n\t}\n\treturn \"gpt-3.5\"\n}\n\nfunc (x *SelefraBlock) GetOpenaiLimit() uint64 {\n\tif x.OpenaiLimit != 0 {\n\t\treturn x.OpenaiLimit\n\t}\n\tif os.Getenv(\"OPENAI_LIMIT\") != \"\" {\n\t\tlimit := os.Getenv(\"OPENAI_LIMIT\")\n\t\treturn utils.StringToUint64(limit)\n\t}\n\treturn 10\n}\n\nfunc (x *SelefraBlock) Merge(other *SelefraBlock) (*SelefraBlock, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\tmergedSelefraBlock := &SelefraBlock{}\n\n\t// CloudBlock\n\tif x.CloudBlock != nil && other.CloudBlock != nil {\n\t\terrorTips := fmt.Sprintf(\"selefra cloud block can not duplicated\")\n\t\treport := RenderErrorTemplate(errorTips, x.CloudBlock.GetNodeLocation(\"\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t} else if x.CloudBlock != nil {\n\t\tmergedSelefraBlock.CloudBlock = x.CloudBlock\n\t} else {\n\t\tmergedSelefraBlock.CloudBlock = other.CloudBlock\n\t}\n\n\t// Name\n\tif x.Name != \"\" && other.Name != \"\" {\n\t\terrorTips := fmt.Sprintf(\"selefra name block can not duplicated\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"name\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t} else if x.Name != \"\" {\n\t\tmergedSelefraBlock.Name = x.Name\n\t} else {\n\t\tmergedSelefraBlock.Name = other.Name\n\t}\n\n\t// CliVersion\n\tif x.CliVersion != \"\" && other.CliVersion != \"\" {\n\t\terrorTips := fmt.Sprintf(\"selefra cli_version block can not duplicated\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"cli_version\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t} else if x.CliVersion != \"\" {\n\t\tmergedSelefraBlock.CliVersion = x.CliVersion\n\t} else {\n\t\tmergedSelefraBlock.CliVersion = other.CliVersion\n\t}\n\n\t// LogLevel\n\tif x.LogLevel != \"\" && other.LogLevel != \"\" {\n\t\terrorTips := fmt.Sprintf(\"selefra log_level block can not duplicated\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"log_level\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t} else if x.LogLevel != \"\" {\n\t\tmergedSelefraBlock.LogLevel = x.LogLevel\n\t} else {\n\t\tmergedSelefraBlock.LogLevel = other.LogLevel\n\t}\n\n\t// only RequireProvidersBlock can merge\n\tif x.RequireProvidersBlock != nil && other.RequireProvidersBlock != nil {\n\t\tmerge, d := x.RequireProvidersBlock.Merge(other.RequireProvidersBlock)\n\t\tdiagnostics.AddDiagnostics(d)\n\t\tif utils.NotHasError(d) {\n\t\t\tmergedSelefraBlock.RequireProvidersBlock = merge\n\t\t}\n\t} else if x.RequireProvidersBlock != nil {\n\t\tmergedSelefraBlock.RequireProvidersBlock = x.RequireProvidersBlock\n\t} else {\n\t\tmergedSelefraBlock.RequireProvidersBlock = other.RequireProvidersBlock\n\t}\n\n\t// ConnectionBlock\n\tif x.ConnectionBlock != nil && other.ConnectionBlock != nil {\n\t\terrorTips := fmt.Sprintf(\"selefra connection block can not duplicated\")\n\t\treport := RenderErrorTemplate(errorTips, x.ConnectionBlock.GetNodeLocation(\"\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t} else if x.ConnectionBlock != nil {\n\t\tmergedSelefraBlock.ConnectionBlock = x.ConnectionBlock\n\t} else {\n\t\tmergedSelefraBlock.ConnectionBlock = other.ConnectionBlock\n\t}\n\n\treturn mergedSelefraBlock, diagnostics\n}\n\nfunc (x *SelefraBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// The local name of the project\n\tif x.Name == \"\" {\n\t\terrorTips := fmt.Sprintf(\"selefra name must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"name\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\t// cloud block is optional, but if it is configured, it must be legal\n\tif x.CloudBlock != nil {\n\t\tdiagnostics.AddDiagnostics(x.CloudBlock.Check(module, validatorContext))\n\t}\n\n\tif x.ConnectionBlock != nil {\n\t\tx.ConnectionBlock.Check(module, validatorContext)\n\t}\n\n\t// TODO To be determined, after discussion to determine the logic\n\t//if len(x.RequireProvidersBlock) == 0 {\n\t//\tdiagnostics.AddErrorMsg(\"selefra.providers can not be empty\")\n\t//} else {\n\t//\tdiagnostics.AddDiagnostics(x.RequireProvidersBlock.Check(module, validatorContext))\n\t//}\n\n\treturn diagnostics\n}\n\nfunc (x *SelefraBlock) IsEmpty() bool {\n\treturn x.Name == \"\" &&\n\t\t(x.CloudBlock == nil || x.CloudBlock.IsEmpty()) &&\n\t\tx.CliVersion == \"\" &&\n\t\tx.LogLevel == \"\" &&\n\t\tlen(x.RequireProvidersBlock) == 0 &&\n\t\tx.ConnectionBlock == nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// CloudBlock CloudBlock-related configuration\ntype CloudBlock struct {\n\n\t// Which project in the cloud is associated with\n\tProject string `yaml:\"project,omitempty\" mapstructure:\"project,omitempty\"`\n\n\t//\n\tOrganization string `yaml:\"organization,omitempty\" mapstructure:\"organization,omitempty\"`\n\n\t// Debug parameters, temporarily masked\n\tHostName string `yaml:\"hostname,omitempty\" mapstructure:\"hostname,omitempty\"`\n\n\t*LocatableImpl `yaml:\"-\"`\n}\n\nvar _ Block = &CloudBlock{}\n\nfunc NewCloudBlock() *CloudBlock {\n\treturn &CloudBlock{\n\t\tLocatableImpl: NewLocatableImpl(),\n\t}\n}\n\nfunc (x *CloudBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// check project name\n\tif x.Project == \"\" {\n\t\terrorTips := fmt.Sprintf(\"cloud project must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"project\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\treturn diagnostics\n}\n\nfunc (x *CloudBlock) IsEmpty() bool {\n\treturn x.Project == \"\" && x.Organization == \"\" && x.HostName == \"\"\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ConnectionBlock for db connection\n// Example:\n//\n//\tconnection:\n//\t   type: postgres\n//\t   username: postgres\n//\t   password: pass\n//\t   host: localhost\n//\t   port: 5432\n//\t   database: postgres\n//\t   sslmode: disable\ntype ConnectionBlock struct {\n\t// These params are mutually exclusive with DSN\n\tType     string   `yaml:\"type,omitempty\" json:\"type,omitempty\"`\n\tUsername string   `yaml:\"username,omitempty\" json:\"username,omitempty\"`\n\tPassword string   `yaml:\"password,omitempty\" json:\"password,omitempty\"`\n\tHost     string   `yaml:\"host,omitempty\" json:\"host,omitempty\"`\n\tPort     *uint64  `yaml:\"port,omitempty\" json:\"port,omitempty\"`\n\tDatabase string   `yaml:\"database,omitempty\" json:\"database,omitempty\"`\n\tSSLMode  string   `yaml:\"sslmode,omitempty\" json:\"sslmode,omitempty\"`\n\tExtras   []string `yaml:\"extras,omitempty\" json:\"extras,omitempty\"`\n\n\t*LocatableImpl `yaml:\"-\"`\n}\n\nvar _ Block = &ConnectionBlock{}\n\nfunc NewConnectionBlock() *ConnectionBlock {\n\treturn &ConnectionBlock{\n\t\tLocatableImpl: NewLocatableImpl(),\n\t}\n}\n\n// ParseConnectionBlockFromDSN convert dsn to connection block\nfunc ParseConnectionBlockFromDSN(dsn string) *ConnectionBlock {\n\t// TODO\n\treturn nil\n}\n\nfunc (x *ConnectionBlock) BuildDSN() string {\n\treturn fmt.Sprintf(\"host=%s user=%s password=%s port=%d dbname=%s sslmode=%s\", x.Host, x.Username, x.Password, *x.Port, x.Database, x.SSLMode)\n}\n\nfunc (x *ConnectionBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\tdiagnostics := schema.NewDiagnostics()\n\n\tif x.Type == \"\" {\n\t\terrorTips := fmt.Sprintf(\"Connection type must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"type\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\tif x.Host == \"\" {\n\t\terrorTips := fmt.Sprintf(\"Connection host must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"host\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\t// Add safety Tips\n\tif x.Username != \"\" && x.Password == \"\" {\n\t\terrorTips := fmt.Sprintf(\"For security reasons, it is not recommended that you use an empty password when connecting to the database\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"password\"))\n\t\tdiagnostics.AddWarn(report)\n\t}\n\n\tif x.Database == \"\" {\n\t\terrorTips := fmt.Sprintf(\"Connection database must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"database\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\treturn diagnostics\n}\n\nfunc (x *ConnectionBlock) IsEmpty() bool {\n\treturn x.Type == \"\" &&\n\t\tx.Username == \"\" &&\n\t\tx.Password == \"\" &&\n\t\tx.Host == \"\" &&\n\t\tx.Port == nil &&\n\t\tx.Database == \"\" &&\n\t\tx.SSLMode == \"\" &&\n\t\tlen(x.Extras) == 0\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype RequireProvidersBlock []*RequireProviderBlock\n\nvar _ MergableBlock[RequireProvidersBlock] = &RequireProvidersBlock{}\nvar _ Block = &RequireProvidersBlock{}\n\nfunc (x RequireProvidersBlock) BuildNameToProviderBlockMap() map[string]*RequireProviderBlock {\n\tm := make(map[string]*RequireProviderBlock)\n\tfor _, r := range x {\n\t\tm[r.Name] = r\n\t}\n\treturn m\n}\n\nfunc (x RequireProvidersBlock) Merge(other RequireProvidersBlock) (RequireProvidersBlock, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tproviderNameSet := make(map[string]struct{})\n\tmergedRequireProvidersBlock := make(RequireProvidersBlock, 0)\n\n\t// merge self\n\tfor _, requireProviderBlock := range x {\n\t\tif _, exists := providerNameSet[requireProviderBlock.Name]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Selefra required providers with the same name is not allowed in the same module. The required provider name %s is the duplication\", requireProviderBlock.Name)\n\t\t\treport := RenderErrorTemplate(errorTips, requireProviderBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tproviderNameSet[requireProviderBlock.Name] = struct{}{}\n\t\tmergedRequireProvidersBlock = append(mergedRequireProvidersBlock, requireProviderBlock)\n\t}\n\n\t// merge other\n\tfor _, requireProviderBlock := range other {\n\t\tif _, exists := providerNameSet[requireProviderBlock.Name]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Selefra required providers with the same name is not allowed in the same module. The required provider name %s is the duplication\", requireProviderBlock.Name)\n\t\t\treport := RenderErrorTemplate(errorTips, requireProviderBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tproviderNameSet[requireProviderBlock.Name] = struct{}{}\n\t\tmergedRequireProvidersBlock = append(mergedRequireProvidersBlock, requireProviderBlock)\n\t}\n\n\treturn mergedRequireProvidersBlock, diagnostics\n}\n\nfunc (x RequireProvidersBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tproviderNameSet := make(map[string]struct{})\n\tproviderSourceSet := make(map[string]struct{})\n\n\tfor _, requireProviderBlock := range x {\n\n\t\tif _, exists := providerNameSet[requireProviderBlock.Name]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Selefra required providers with the same name is not allowed in the same module. The required provider name %s is the duplication\", requireProviderBlock.Name)\n\t\t\treport := RenderErrorTemplate(errorTips, requireProviderBlock.GetNodeLocation(\"name\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tproviderNameSet[requireProviderBlock.Name] = struct{}{}\n\n\t\tif _, exists := providerSourceSet[requireProviderBlock.Source]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Selefra required providers with the same source is not allowed in the same module. The required provider source %s is the duplication\", requireProviderBlock.Source)\n\t\t\treport := RenderErrorTemplate(errorTips, requireProviderBlock.GetNodeLocation(\"source\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tproviderSourceSet[requireProviderBlock.Name] = struct{}{}\n\n\t\tdiagnostics.AddDiagnostics(requireProviderBlock.Check(module, validatorContext))\n\t}\n\n\treturn diagnostics\n}\n\nfunc (x RequireProvidersBlock) IsEmpty() bool {\n\treturn len(x) == 0\n}\n\nfunc (x RequireProvidersBlock) GetNodeLocation(selector string) *NodeLocation {\n\tpanic(\"not supported\")\n}\n\nfunc (x RequireProvidersBlock) SetNodeLocation(selector string, nodeLocation *NodeLocation) error {\n\tpanic(\"not supported\")\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// RequireProviderBlock Specifies the version of the Provider to be installed\ntype RequireProviderBlock struct {\n\n\t// The name of this constraint\n\tName string `yaml:\"name,omitempty\" json:\"name,omitempty\"`\n\n\t// Where does the Provider load from\n\tSource string `yaml:\"source,omitempty\" json:\"source,omitempty\"`\n\n\t// Version requirements for this provider\n\tVersion string `yaml:\"version,omitempty\" json:\"version,omitempty\"`\n\n\t// The debug parameter, if configured, uses the given path instead of downloading\n\tPath string `yaml:\"path,omitempty\" json:\"path,omitempty\"`\n\n\t//runtime *RequireProviderBlockRuntime\n\t*LocatableImpl `yaml:\"-\"`\n}\n\nvar _ Block = &RequireProviderBlock{}\n\n//var _ HaveRuntime[*RequireProviderBlockRuntime] = &RequireProviderBlock{}\n\nfunc NewRequireProviderBlock() *RequireProviderBlock {\n\tx := &RequireProviderBlock{\n\t\tLocatableImpl: NewLocatableImpl(),\n\t}\n\t//x.runtime = NewRequireProviderBlockRuntime(x)\n\treturn x\n}\n\nfunc (x *RequireProviderBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tif x.Name == \"\" {\n\t\terrorTips := fmt.Sprintf(\"Reqioired provider name must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"name\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\tif x.Source == \"\" {\n\t\terrorTips := fmt.Sprintf(\"Reqioired provider source must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"source\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\t//if x.Version == \"\" {\n\t//\t// TODO block location\n\t//\tdiagnostics.AddErrorMsg(\"selefra.providers.version can not be empty\")\n\t//}\n\n\t// check file is exists\n\tif x.Path != \"\" {\n\t\tif !utils.ExistsFile(x.Path) {\n\t\t\terrorTips := fmt.Sprintf(\"Reqioired provider path not exists: %s\", x.Path)\n\t\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"path\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t}\n\t}\n\n\t//diagnostics.AddDiagnostics(x.runtime.check())\n\n\treturn diagnostics\n}\n\nfunc (x *RequireProviderBlock) IsEmpty() bool {\n\treturn x.Name == \"\" && x.Source == \"\" && x.Version == \"\" && x.Path == \"\"\n}\n\n//func (x *RequireProviderBlock) Runtime() *RequireProviderBlockRuntime {\n//\treturn x.runtime\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n//\n//type RequireProviderBlockRuntime struct {\n//\tblock *RequireProviderBlock\n//\n//\t// Parsed version constraint\n//\tConstraints version.Constraints\n//}\n//\n//func NewRequireProviderBlockRuntime(block *RequireProviderBlock) *RequireProviderBlockRuntime {\n//\treturn &RequireProviderBlockRuntime{\n//\t\tblock:       block,\n//\t\tConstraints: nil,\n//\t}\n//}\n//\n//func (x *RequireProviderBlockRuntime) check() *schema.Diagnostics {\n//\treturn x.ensureConstraints()\n//}\n//\n//// IsConstraintsAllow Determines whether the given version conforms to the version constraint\n//func (x *RequireProviderBlockRuntime) IsConstraintsAllow(version *version.Version) (bool, *schema.Diagnostics) {\n//\td := x.ensureConstraints()\n//\tif utils.HasError(d) {\n//\t\treturn false, d\n//\t}\n//\n//\t// Any version can meet the constraints\n//\tfor _, c := range x.Constraints {\n//\t\tif c.Check(version) {\n//\t\t\treturn true, nil\n//\t\t}\n//\t}\n//\treturn false, nil\n//}\n//\n//func (x *RequireProviderBlockRuntime) ensureConstraints() *schema.Diagnostics {\n//\tif x.Constraints != nil {\n//\t\treturn nil\n//\t}\n//\t// Parse the version into structured information\n//\tconstraint, err := version.NewConstraint(x.block.Version)\n//\tif err != nil {\n//\t\t// TODO block location\n//\t\treturn schema.NewDiagnostics().AddErrorMsg(\"parse version constraints error\")\n//\t}\n//\tx.Constraints = constraint\n//\treturn nil\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// TODO wait discussion, Add some configuration blocks to support a private registry\n//type RegistryBlock struct {\n//\tType        string\n//\tPrivate     bool\n//\tRegistryUrl string\n//\tSource      string\n//\tToken       string\n//\tTokenEnv    string\n//}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module/validator.go",
    "content": "package module\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ValidatorContext Some global context information stored during validation\ntype ValidatorContext struct {\n\n\t// Global collection of rule ids\n\tRulesIdSet map[string]*RuleBlock\n\n\t// All module names, if there are module names such as the same name should be able to check out\n\tModuleNameSet map[string]*ModuleBlock\n}\n\n// NewValidatorContext Create a validation context\nfunc NewValidatorContext() *ValidatorContext {\n\treturn &ValidatorContext{\n\t\tRulesIdSet:    make(map[string]*RuleBlock),\n\t\tModuleNameSet: make(map[string]*ModuleBlock),\n\t}\n}\n\n// AddRuleBlock Add rules to the validation context\nfunc (x *ValidatorContext) AddRuleBlock(ruleBlock *RuleBlock) {\n\tif ruleBlock.MetadataBlock != nil {\n\t\tx.RulesIdSet[ruleBlock.MetadataBlock.Id] = ruleBlock\n\t}\n}\n\n// GetRuleBlockById Determine whether the given rule is in context\nfunc (x *ValidatorContext) GetRuleBlockById(ruleId string) (*RuleBlock, bool) {\n\truleBlock, exists := x.RulesIdSet[ruleId]\n\treturn ruleBlock, exists\n}\n\n// AddModuleBlock Adds the module to the current validator context\nfunc (x *ValidatorContext) AddModuleBlock(moduleBlock *ModuleBlock) {\n\tx.ModuleNameSet[moduleBlock.Name] = moduleBlock\n}\n\n// GetModuleByName Gets the module in the validation context\nfunc (x *ValidatorContext) GetModuleByName(moduleName string) (*ModuleBlock, bool) {\n\tmoduleBlock, exists := x.ModuleNameSet[moduleName]\n\treturn moduleBlock, exists\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst CheckIdentityErrorMsg = \"only allow \\\"a-z,A-Z,0-9,_\\\" and can't start with a number\"\n\nfunc CheckIdentity(s string) bool {\n\n\tif len(s) == 0 {\n\t\treturn false\n\t}\n\n\t// And you can't start with a number\n\tif s[0] >= '0' && s[0] <= '9' {\n\t\treturn false\n\t}\n\n\t// Only the given character can be used\n\tfor _, c := range s {\n\t\tisOk := (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '_')\n\t\tif !isOk {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ParseDuration\n//\n//\tfunc ParseDuration(d string) (time.Duration, error) {\n//\t\td = strings.TrimSpace(d)\n//\t\tdr, err := time.ParseDuration(d)\n//\t\tif err == nil {\n//\t\t\treturn dr, nil\n//\t\t}\n//\t\tif strings.Contains(d, \"d\") {\n//\t\t\tindex := strings.Index(d, \"d\")\n//\t\t\thour, err := strconv.Atoi(d[:index])\n//\t\t\tif err != nil {\n//\t\t\t\treturn dr, err\n//\t\t\t}\n//\t\t\tdr = time.Hour * 24 * time.Duration(hour)\n//\t\t\ts := d[index+1:]\n//\t\t\tif s != \"\" {\n//\t\t\t\tndr, err := time.ParseDuration(d[index+1:])\n//\t\t\t\tif err != nil {\n//\t\t\t\t\treturn dr, err\n//\t\t\t\t}\n//\t\t\t\tdr += ndr\n//\t\t\t}\n//\t\t\treturn dr, nil\n//\t\t}\n//\n//\t\tdv, err := strconv.ParseInt(d, 10, 64)\n//\t\treturn time.Duration(dv), err\n//\t}\nfunc ParseDuration(d string) (time.Duration, error) {\n\td = strings.TrimSpace(d)\n\tdr, err := time.ParseDuration(d)\n\tif err == nil {\n\t\treturn dr, nil\n\t}\n\tif strings.Contains(d, \"d\") {\n\t\tindex := strings.Index(d, \"d\")\n\t\thour, err := strconv.Atoi(d[:index])\n\t\tif err != nil {\n\t\t\treturn dr, err\n\t\t}\n\t\tdr = time.Hour * 24 * time.Duration(hour)\n\t\ts := d[index+1:]\n\t\tif s != \"\" {\n\t\t\tndr, err := time.ParseDuration(d[index+1:])\n\t\t\tif err != nil {\n\t\t\t\treturn dr, err\n\t\t\t}\n\t\t\tdr += ndr\n\t\t}\n\t\treturn dr, nil\n\t}\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdv, err := strconv.ParseInt(d, 10, 64)\n\treturn time.Duration(dv), err\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module/validator_test.go",
    "content": "package module\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestParseDuration(t *testing.T) {\n\ts := \"1d\"\n\tduration, err := ParseDuration(s)\n\tassert.Nil(t, err)\n\tassert.NotEqual(t, 0, duration)\n}\n"
  },
  {
    "path": "pkg/modules/module/variables_block.go",
    "content": "package module\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/reflect_util\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// VariablesBlock One of the root-level code blocks\ntype VariablesBlock []*VariableBlock\n\nvar _ Block = (*VariablesBlock)(nil)\nvar _ MergableBlock[VariablesBlock] = (*VariablesBlock)(nil)\n\nfunc (x VariablesBlock) Merge(other VariablesBlock) (VariablesBlock, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tvariableKeySet := make(map[string]struct{}, 0)\n\tmergedVariables := make(VariablesBlock, 0)\n\n\t// merge self\n\tfor _, variableBlock := range x {\n\t\tif _, exists := variableKeySet[variableBlock.Key]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Variable with the same key is not allowed in the same module. The key %s is duplication\", variableBlock.Key)\n\t\t\treport := RenderErrorTemplate(errorTips, variableBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tmergedVariables = append(mergedVariables, variableBlock)\n\t\tvariableKeySet[variableBlock.Key] = struct{}{}\n\t}\n\n\t// merge other\n\tfor _, variableBlock := range other {\n\t\tif _, exists := variableKeySet[variableBlock.Key]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Variable with the same key is not allowed in the same module. The key %s is duplication\", variableBlock.Key)\n\t\t\treport := RenderErrorTemplate(errorTips, variableBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tmergedVariables = append(mergedVariables, variableBlock)\n\t\tvariableKeySet[variableBlock.Key] = struct{}{}\n\t}\n\n\treturn mergedVariables, diagnostics\n}\n\nfunc (x VariablesBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tvariableKeySet := make(map[string]struct{}, 0)\n\tfor _, variableBlock := range x {\n\t\tif _, exists := variableKeySet[variableBlock.Key]; exists {\n\t\t\terrorTips := fmt.Sprintf(\"Variable with the same key is not allowed in the same module. The key %s is duplication\", variableBlock.Key)\n\t\t\treport := RenderErrorTemplate(errorTips, variableBlock.GetNodeLocation(\"\"))\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t\tcontinue\n\t\t}\n\t\tvariableKeySet[variableBlock.Key] = struct{}{}\n\t\tdiagnostics.AddDiagnostics(variableBlock.Check(module, validatorContext))\n\t}\n\n\treturn diagnostics\n}\n\nfunc (x VariablesBlock) IsEmpty() bool {\n\treturn len(x) == 0\n}\n\nfunc (x VariablesBlock) GetNodeLocation(selector string) *NodeLocation {\n\tpanic(\"not supported\")\n}\n\nfunc (x VariablesBlock) SetNodeLocation(selector string, nodeLocation *NodeLocation) error {\n\tpanic(\"not supported\")\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// VariableBlock Used to declare a variable\ntype VariableBlock struct {\n\n\t// Name of a variable\n\tKey string `yaml:\"key\" json:\"key\"`\n\n\t// The default value of the variable\n\tDefault any `yaml:\"default\" json:\"default\"`\n\n\t// A description of this variable\n\tDescription string `yaml:\"description\" json:\"description\"`\n\n\t// Who is the author of the variable? What the hell is this?\n\tAuthor string `yaml:\"author\" json:\"author\"`\n\n\t*LocatableImpl `yaml:\"-\"`\n}\n\nvar _ Block = &VariableBlock{}\n\nfunc NewVariableBlock() *VariableBlock {\n\treturn &VariableBlock{\n\t\tLocatableImpl: NewLocatableImpl(),\n\t}\n}\n\nfunc (x *VariableBlock) Check(module *Module, validatorContext *ValidatorContext) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tif x.Key == \"\" {\n\t\terrorTips := fmt.Sprintf(\"Variable key must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"key\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\tif reflect_util.IsNil(x.Default) {\n\t\terrorTips := fmt.Sprintf(\"Variable default must not be empty\")\n\t\treport := RenderErrorTemplate(errorTips, x.GetNodeLocation(\"default\"))\n\t\tdiagnostics.AddErrorMsg(report)\n\t}\n\n\treturn diagnostics\n}\n\nfunc (x *VariableBlock) IsEmpty() bool {\n\treturn x.Key == \"\" &&\n\t\treflect_util.IsNil(x.Default) &&\n\t\tx.Description == \"\" &&\n\t\tx.Author == \"\"\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module_loader/github_registry_module_loader.go",
    "content": "package module_loader\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/pointer\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/selefra/selefra/pkg/version\"\n\t\"path/filepath\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// GitHubRegistryModuleLoaderOptions Options when creating the GitHub Registry\ntype GitHubRegistryModuleLoaderOptions struct {\n\t*ModuleLoaderOptions\n\n\t// The full name of the Registry's repository\n\tRegistryRepoFullName string `json:\"registry-repo-full-name\" yaml:\"registry-repo-full-name\"`\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// GitHubRegistryModuleLoader Load the module from GitHub's Registry\ntype GitHubRegistryModuleLoader struct {\n\tgithubRegistry *registry.ModuleGitHubRegistry\n\toptions        *GitHubRegistryModuleLoaderOptions\n\n\tdownloadModule          *registry.Module\n\tmoduleDownloadDirectory string\n}\n\nvar _ ModuleLoader[*GitHubRegistryModuleLoaderOptions] = &GitHubRegistryModuleLoader{}\n\nfunc NewGitHubRegistryModuleLoader(options *GitHubRegistryModuleLoaderOptions) (*GitHubRegistryModuleLoader, error) {\n\n\tregistryOptions := registry.NewModuleGithubRegistryOptions(options.DownloadDirectory, options.RegistryRepoFullName)\n\tgithubRegistry, err := registry.NewModuleGitHubRegistry(registryOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// check params\n\tmoduleNameAndVersion := version.ParseNameAndVersion(options.Source)\n\tmetadata, err := githubRegistry.GetMetadata(context.Background(), registry.NewModule(moduleNameAndVersion.Name, moduleNameAndVersion.Version))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmoduleVersion := moduleNameAndVersion.Version\n\t// If it is the latest version, change it to the type it should be\n\tif moduleNameAndVersion.IsLatestVersion() {\n\t\tmoduleVersion = metadata.LatestVersion\n\t}\n\n\tif !metadata.HasVersion(moduleVersion) {\n\t\treturn nil, fmt.Errorf(\"module version not found, uses source %s\", options.Source)\n\t}\n\n\t// The version to which the module will be downloaded\n\tmoduleDownloadDirectory := filepath.Join(utils.AbsPath(options.DownloadDirectory), DownloadModulesDirectoryName, moduleNameAndVersion.Name, moduleVersion)\n\n\toptions.Version = moduleVersion\n\treturn &GitHubRegistryModuleLoader{\n\t\tgithubRegistry:          githubRegistry,\n\t\toptions:                 options,\n\t\tdownloadModule:          registry.NewModule(moduleNameAndVersion.Name, moduleVersion),\n\t\tmoduleDownloadDirectory: moduleDownloadDirectory,\n\t}, nil\n}\n\nfunc (x *GitHubRegistryModuleLoader) Name() ModuleLoaderType {\n\treturn ModuleLoaderTypeGitHubRegistry\n}\n\nfunc (x *GitHubRegistryModuleLoader) Load(ctx context.Context) (*module.Module, bool) {\n\n\tdefer func() {\n\t\tx.options.MessageChannel.SenderWaitAndClose()\n\t}()\n\n\t// Download the given repository\n\tdownloadOptions := &registry.ModuleRegistryDownloadOptions{\n\t\tModuleDownloadDirectoryPath: x.moduleDownloadDirectory,\n\t\tSkipVerify:                  pointer.TruePointer(),\n\t\tProgressTracker:             x.options.ProgressTracker,\n\t}\n\tmoduleDownloadDirectory, err := x.githubRegistry.Download(ctx, x.downloadModule, downloadOptions)\n\tif err != nil {\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"from github registry download module %s failed: %s\", x.downloadModule.String(), err.Error()))\n\t\treturn nil, false\n\t}\n\n\t// send tips\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"download github module %s to local directory %s\", x.downloadModule.String(), moduleDownloadDirectory))\n\n\t// Continue to load submodules, if any\n\tlocalDirectoryModuleLoaderOptions := &LocalDirectoryModuleLoaderOptions{\n\t\tModuleLoaderOptions: &ModuleLoaderOptions{\n\t\t\tSource:  x.options.Source,\n\t\t\tVersion: x.options.Version,\n\t\t\t// TODO\n\t\t\t//ProgressTracker:   x.ProgressTracker,\n\t\t\tDownloadDirectory: x.options.DownloadDirectory,\n\t\t\tMessageChannel:    x.options.MessageChannel.MakeChildChannel(),\n\t\t\t// The dependency level does not increase\n\t\t\tDependenciesTree: x.options.DependenciesTree,\n\t\t},\n\t\tModuleDirectory: moduleDownloadDirectory,\n\t}\n\tloader, err := NewLocalDirectoryModuleLoader(localDirectoryModuleLoaderOptions)\n\tif err != nil {\n\t\tlocalDirectoryModuleLoaderOptions.MessageChannel.SenderWaitAndClose()\n\t\terrorTips := fmt.Sprintf(\"github module %s local path %s load failed: %s\", x.options.Source, moduleDownloadDirectory, err.Error())\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(errorTips))\n\t\treturn nil, false\n\t}\n\n\treturn loader.Load(ctx)\n}\n\nfunc (x *GitHubRegistryModuleLoader) Options() *GitHubRegistryModuleLoaderOptions {\n\treturn x.options\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module_loader/github_registry_module_loader_test.go",
    "content": "package module_loader\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nconst testDownloadDirectory = \"./test_download\"\n\nfunc TestGitHubRegistryModuleLoader_Load(t *testing.T) {\n\n\tsource := \"rules-aws-misconfiguration-s3@v0.0.4\"\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.ToString())\n\t\t}\n\t})\n\n\tloader, err := NewGitHubRegistryModuleLoader(&GitHubRegistryModuleLoaderOptions{\n\t\tModuleLoaderOptions: &ModuleLoaderOptions{\n\t\t\tSource:            source,\n\t\t\tVersion:           \"\",\n\t\t\tMessageChannel:    messageChannel,\n\t\t\tDownloadDirectory: testDownloadDirectory,\n\t\t\tDependenciesTree:  []string{source},\n\t\t},\n\t\tRegistryRepoFullName: \"selefra/registry\",\n\t})\n\tassert.Nil(t, err)\n\trootModule, b := loader.Load(context.Background())\n\tmessageChannel.ReceiverWait()\n\tassert.True(t, b)\n\tassert.NotNil(t, rootModule)\n\n}\n"
  },
  {
    "path": "pkg/modules/module_loader/local_directory_module_loader.go",
    "content": "package module_loader\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/modules/parser\"\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"strings\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// LocalDirectoryModuleLoaderOptions Option when loading modules from a local directory\ntype LocalDirectoryModuleLoaderOptions struct {\n\t*ModuleLoaderOptions\n\tInstruction map[string]interface{} `json:\"instruction\" yaml:\"instruction\"`\n\t// Directory where the module resides Directory\n\tModuleDirectory string `json:\"module-directory\" yaml:\"module-directory\"`\n}\n\n//func (x *LocalDirectoryModuleLoaderOptions) Copy() *LocalDirectoryModuleLoaderOptions {\n//\treturn &LocalDirectoryModuleLoaderOptions{\n//\t\tModuleLoaderOptions: x.ModuleLoaderOptions.Copy(),\n//\t\tModuleDirectory:     x.ModuleDirectory,\n//\t}\n//}\n\n//func (x *LocalDirectoryModuleLoaderOptions) CopyForModuleDirectory(source, moduleDirectory string) *LocalDirectoryModuleLoaderOptions {\n//\toptions := x.Copy()\n//\toptions.Source = source\n//\toptions.ModuleDirectory = moduleDirectory\n//\toptions.DependenciesTree = append(options.DependenciesTree, source)\n//\treturn options\n//}\n\n// BuildFullName Gets the globally unique identity of the module\nfunc (x *LocalDirectoryModuleLoaderOptions) BuildFullName() string {\n\tif x.Source == \"\" {\n\t\treturn x.ModuleDirectory\n\t} else {\n\t\treturn fmt.Sprintf(\"%s @ %s\", x.Source, x.ModuleDirectory)\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// LocalDirectoryModuleLoader Load the module from the local directory\ntype LocalDirectoryModuleLoader struct {\n\toptions *LocalDirectoryModuleLoaderOptions\n}\n\nvar _ ModuleLoader[*LocalDirectoryModuleLoaderOptions] = &LocalDirectoryModuleLoader{}\n\nfunc NewLocalDirectoryModuleLoader(options *LocalDirectoryModuleLoaderOptions) (*LocalDirectoryModuleLoader, error) {\n\n\t// convert to abs path\n\toptions.ModuleDirectory = utils.AbsPath(options.ModuleDirectory)\n\n\tif !utils.ExistsDirectory(options.ModuleDirectory) {\n\t\treturn nil, fmt.Errorf(\"module %s does not exist or is not directory\", options.BuildFullName())\n\t}\n\n\treturn &LocalDirectoryModuleLoader{\n\t\toptions: options,\n\t}, nil\n}\n\nfunc (x *LocalDirectoryModuleLoader) Name() ModuleLoaderType {\n\treturn ModuleLoaderTypeLocalDirectory\n}\n\nfunc (x *LocalDirectoryModuleLoader) Load(ctx context.Context) (*module.Module, bool) {\n\tdefer func() {\n\t\tx.options.MessageChannel.SenderWaitAndClose()\n\t}()\n\n\t// check path\n\td := x.checkModuleDirectory()\n\tx.options.MessageChannel.Send(d)\n\tif utils.HasError(d) {\n\t\treturn nil, false\n\t}\n\n\t// list all yaml file\n\tyamlFilePathSlice, d := x.listModuleDirectoryYamlFilePath()\n\tx.options.MessageChannel.Send(d)\n\tif utils.HasError(d) {\n\t\treturn nil, false\n\t}\n\n\t// Read all files under the module as modules, these modules may be incomplete, may be some fragments of the module\n\tyamlFileModuleSlice := make([]*module.Module, len(yamlFilePathSlice))\n\tisHasError := false\n\tfor index, yamlFilePath := range yamlFilePathSlice {\n\t\tyamlFileModule, d := parser.NewYamlFileToModuleParser(yamlFilePath, x.options.Instruction).Parse()\n\t\tx.options.MessageChannel.Send(d)\n\t\tif utils.HasError(d) {\n\t\t\tisHasError = true\n\t\t}\n\t\tyamlFileModuleSlice[index] = yamlFileModule\n\t}\n\tif isHasError {\n\t\treturn nil, false\n\t}\n\n\t// Merge these modules\n\tfinalModule := module.NewModule()\n\thasError := false\n\tfor _, yamlFileModule := range yamlFileModuleSlice {\n\t\tmerge, d := finalModule.Merge(yamlFileModule)\n\t\tx.options.MessageChannel.Send(d)\n\t\tif utils.HasError(d) {\n\t\t\thasError = true\n\t\t}\n\t\tif merge != nil {\n\t\t\tfinalModule = merge\n\t\t}\n\t}\n\tif hasError {\n\t\treturn nil, false\n\t}\n\n\t// load sub modules\n\tsubModuleSlice, loadSuccess := x.loadSubModules(ctx, finalModule.ModulesBlock)\n\tif !loadSuccess {\n\t\treturn nil, false\n\t}\n\tfor _, subModule := range subModuleSlice {\n\t\tsubModule.ProvidersBlock = finalModule.ProvidersBlock\n\t\tsubModule.SelefraBlock = finalModule.SelefraBlock\n\t\tsubModule.ParentModule = finalModule\n\t\t//subModule.VariablesBlock = finalModule.VariablesBlock\n\t}\n\tfinalModule.SubModules = subModuleSlice\n\tfinalModule.Source = x.options.Source\n\tfinalModule.ModuleLocalDirectory = x.options.ModuleDirectory\n\tfinalModule.DependenciesPath = x.options.DependenciesTree\n\n\treturn finalModule, true\n}\n\nfunc (x *LocalDirectoryModuleLoader) loadSubModules(ctx context.Context, modulesBlock module.ModulesBlock) ([]*module.Module, bool) {\n\tsubModuleSlice := make([]*module.Module, 0)\n\tfor _, moduleBlock := range modulesBlock {\n\t\tuseModuleSource := moduleBlock.Uses\n\t\tuseLocation := moduleBlock.GetNodeLocation(fmt.Sprintf(\"uses\"))\n\t\t//moduleDirectoryPath := filepath.Dir(useLocation.Path)\n\n\t\tswitch NewModuleLoaderBySource(moduleBlock.Uses) {\n\n\t\t// Unsupported loading mode\n\t\tcase ModuleLoaderTypeInvalid:\n\t\t\terrorReport := module.RenderErrorTemplate(fmt.Sprintf(\"invalid module uses source %s, unsupported module loader\", useModuleSource), useLocation)\n\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(errorReport))\n\t\t\treturn nil, false\n\n\t\t// Load the module from the bucket in S3\n\t\tcase ModuleLoaderTypeS3Bucket:\n\t\t\tsubModule, ok := x.loadS3BucketModule(ctx, useLocation, useModuleSource)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tsubModuleSlice = append(subModuleSlice, subModule)\n\n\t\tcase ModuleLoaderTypeGitHubRegistry:\n\t\t\tsubModule, ok := x.loadGitHubRegistryModule(ctx, useLocation, useModuleSource)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tsubModuleSlice = append(subModuleSlice, subModule)\n\n\t\tcase ModuleLoaderTypeLocalDirectory:\n\t\t\tsubModule, ok := x.loadLocalDirectoryModule(ctx, useLocation, useModuleSource)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tsubModuleSlice = append(subModuleSlice, subModule)\n\n\t\tcase ModuleLoaderTypeURL:\n\t\t\tsubModule, ok := x.loadURLModule(ctx, useLocation, useModuleSource)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tsubModuleSlice = append(subModuleSlice, subModule)\n\n\t\tdefault:\n\t\t\terrorReport := module.RenderErrorTemplate(fmt.Sprintf(\"module source %s can cannot be assign loader\", useModuleSource), useLocation)\n\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(errorReport))\n\t\t\treturn nil, false\n\t\t}\n\t}\n\treturn subModuleSlice, true\n}\n\nfunc (x *LocalDirectoryModuleLoader) loadURLModule(ctx context.Context, useLocation *module.NodeLocation, useModuleSource string) (*module.Module, bool) {\n\turlModuleLoaderOptions := &URLModuleLoaderOptions{\n\t\tModuleLoaderOptions: &ModuleLoaderOptions{\n\t\t\tSource: useModuleSource,\n\t\t\t// TODO\n\t\t\tVersion:           \"\",\n\t\t\tDownloadDirectory: x.options.DownloadDirectory,\n\t\t\t// TODO\n\t\t\tProgressTracker:  x.options.ProgressTracker,\n\t\t\tMessageChannel:   x.options.MessageChannel.MakeChildChannel(),\n\t\t\tDependenciesTree: x.options.DeepDependenciesTree(useModuleSource),\n\t\t},\n\t\tModuleURL: useModuleSource,\n\t}\n\tloader, err := NewURLModuleLoader(urlModuleLoaderOptions)\n\tif err != nil {\n\t\turlModuleLoaderOptions.MessageChannel.SenderWaitAndClose()\n\t\terrorReport := module.RenderErrorTemplate(fmt.Sprintf(\"create url module %s error: %s\", useModuleSource, err.Error()), useLocation)\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(errorReport))\n\t\treturn nil, false\n\t}\n\treturn loader.Load(ctx)\n}\n\nfunc (x *LocalDirectoryModuleLoader) loadLocalDirectoryModule(ctx context.Context, useLocation *module.NodeLocation, useModuleSource string) (*module.Module, bool) {\n\n\t// The path of the submodule should be from the current path\n\tsubModuleDirectory := filepath.Join(utils.AbsPath(x.options.ModuleDirectory), useModuleSource)\n\n\tsubModuleLocalDirectoryOptions := &LocalDirectoryModuleLoaderOptions{\n\t\tInstruction: x.options.Instruction,\n\t\tModuleLoaderOptions: &ModuleLoaderOptions{\n\t\t\tSource: useModuleSource,\n\t\t\t// TODO\n\t\t\tVersion:           \"\",\n\t\t\tDownloadDirectory: x.options.DownloadDirectory,\n\t\t\t// TODO\n\t\t\tProgressTracker:  x.options.ProgressTracker,\n\t\t\tMessageChannel:   x.options.MessageChannel.MakeChildChannel(),\n\t\t\tDependenciesTree: x.options.DeepDependenciesTree(useModuleSource),\n\t\t},\n\t\tModuleDirectory: subModuleDirectory,\n\t}\n\n\tloader, err := NewLocalDirectoryModuleLoader(subModuleLocalDirectoryOptions)\n\tif err != nil {\n\t\tsubModuleLocalDirectoryOptions.MessageChannel.SenderWaitAndClose()\n\t\terrorReport := module.RenderErrorTemplate(fmt.Sprintf(\"create local directory module %s error: %s\", subModuleLocalDirectoryOptions.BuildFullName(), err.Error()), useLocation)\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(errorReport))\n\t\treturn nil, false\n\t}\n\treturn loader.Load(ctx)\n}\n\nfunc (x *LocalDirectoryModuleLoader) loadGitHubRegistryModule(ctx context.Context, useLocation *module.NodeLocation, useModuleSource string) (*module.Module, bool) {\n\n\tgithubOptions := &GitHubRegistryModuleLoaderOptions{\n\t\tModuleLoaderOptions: &ModuleLoaderOptions{\n\t\t\tSource: useModuleSource,\n\t\t\t// TODO\n\t\t\tVersion: \"\",\n\t\t\t// TODO\n\t\t\t//ProgressTracker:   x.ProgressTracker,\n\t\t\tDownloadDirectory: x.options.DownloadDirectory,\n\t\t\tMessageChannel:    x.options.MessageChannel.MakeChildChannel(),\n\t\t\tDependenciesTree:  x.options.DeepDependenciesTree(useModuleSource),\n\t\t},\n\t\tRegistryRepoFullName: registry.ModuleGithubRegistryDefaultRepoFullName,\n\t}\n\n\tloader, err := NewGitHubRegistryModuleLoader(githubOptions)\n\tif err != nil {\n\t\tgithubOptions.MessageChannel.SenderWaitAndClose()\n\t\terrorReport := module.RenderErrorTemplate(fmt.Sprintf(\"create github registry module %s error: %s\", githubOptions.Source, err.Error()), useLocation)\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(errorReport))\n\t\treturn nil, false\n\t}\n\n\treturn loader.Load(ctx)\n}\n\nfunc (x *LocalDirectoryModuleLoader) loadS3BucketModule(ctx context.Context, useLocation *module.NodeLocation, useModuleSource string) (*module.Module, bool) {\n\n\ts3Options := &S3BucketModuleLoaderOptions{\n\t\tModuleLoaderOptions: &ModuleLoaderOptions{\n\t\t\tSource: useModuleSource,\n\t\t\t// TODO\n\t\t\tVersion: \"\",\n\t\t\t// TODO\n\t\t\t//ProgressTracker:   x.ProgressTracker,\n\t\t\tDownloadDirectory: x.options.DownloadDirectory,\n\t\t\tMessageChannel:    x.options.MessageChannel.MakeChildChannel(),\n\t\t\tDependenciesTree:  x.options.DeepDependenciesTree(useModuleSource),\n\t\t},\n\t\tS3BucketURL: useModuleSource,\n\t}\n\n\tloader, err := NewS3BucketModuleLoader(s3Options)\n\n\tif err != nil {\n\t\ts3Options.MessageChannel.SenderWaitAndClose()\n\t\terrorReport := module.RenderErrorTemplate(fmt.Sprintf(\"create s3 module loader %s error: %s\", s3Options.Source, err.Error()), useLocation)\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(errorReport))\n\t\treturn nil, false\n\t}\n\n\treturn loader.Load(ctx)\n}\n\nfunc (x *LocalDirectoryModuleLoader) Options() *LocalDirectoryModuleLoaderOptions {\n\treturn x.options\n}\n\n// Check that the given module local path is correct\nfunc (x *LocalDirectoryModuleLoader) checkModuleDirectory() *schema.Diagnostics {\n\tinfo, err := os.Stat(x.options.ModuleDirectory)\n\tif err != nil {\n\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\treturn schema.NewDiagnostics().AddErrorMsg(\"module %s not found\", x.options.BuildFullName())\n\t\t} else {\n\t\t\treturn schema.NewDiagnostics().AddErrorMsg(\"module %s load error: %s\", x.options.BuildFullName(), err.Error())\n\t\t}\n\t}\n\n\tif !info.IsDir() {\n\t\treturn schema.NewDiagnostics().AddErrorMsg(\"module %s found, but not is directory\", x.options.BuildFullName())\n\t}\n\n\treturn nil\n}\n\n// Lists all yaml files in the directory where the module resides\nfunc (x *LocalDirectoryModuleLoader) listModuleDirectoryYamlFilePath() ([]string, *schema.Diagnostics) {\n\tdir, err := os.ReadDir(x.options.ModuleDirectory)\n\tif err != nil {\n\t\treturn nil, schema.NewDiagnostics().AddErrorMsg(\"module %s read error: %s\", x.options.BuildFullName(), err.Error())\n\t}\n\tyamlFileSlice := make([]string, 0)\n\tfor _, entry := range dir {\n\t\tif entry.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tif IsYamlFile(entry) {\n\t\t\tyamlFilePath := filepath.Join(utils.AbsPath(x.options.ModuleDirectory), entry.Name())\n\t\t\tyamlFileSlice = append(yamlFileSlice, yamlFilePath)\n\t\t}\n\t}\n\treturn yamlFileSlice, nil\n}\n\nfunc IsYamlFile(entry os.DirEntry) bool {\n\tif entry.IsDir() {\n\t\treturn false\n\t}\n\text := strings.ToLower(path.Ext(entry.Name()))\n\treturn strings.HasSuffix(ext, \".yaml\") || strings.HasSuffix(ext, \".yml\")\n}\n"
  },
  {
    "path": "pkg/modules/module_loader/local_directory_module_loader_test.go",
    "content": "package module_loader\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestLocalDirectoryModuleLoader_Load(t *testing.T) {\n\n\tsource := \"./test_data/module_mixed\"\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.ToString())\n\t\t}\n\t})\n\tloader, err := NewLocalDirectoryModuleLoader(&LocalDirectoryModuleLoaderOptions{\n\t\tModuleLoaderOptions: &ModuleLoaderOptions{\n\t\t\tSource:            source,\n\t\t\tVersion:           \"\",\n\t\t\tDownloadDirectory: testDownloadDirectory,\n\t\t\tMessageChannel:    messageChannel,\n\t\t\tDependenciesTree:  []string{source},\n\t\t},\n\t\tModuleDirectory: source,\n\t})\n\tassert.Nil(t, err)\n\trootModule, isLoadSuccess := loader.Load(context.Background())\n\tassert.True(t, isLoadSuccess)\n\tassert.NotNil(t, rootModule)\n\tmessageChannel.ReceiverWait()\n}\n"
  },
  {
    "path": "pkg/modules/module_loader/module_loader.go",
    "content": "package module_loader\n\nimport (\n\t\"context\"\n\t\"github.com/hashicorp/go-getter\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst (\n\tDownloadModulesDirectoryName = \"modules\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ModuleLoaderOptions Options when loading the module\ntype ModuleLoaderOptions struct {\n\n\t// Where can I download the module\n\tSource string `json:\"source\" yaml:\"source\"`\n\n\t// Which version of which module to download\n\tVersion string `json:\"version\" yaml:\"version\"`\n\n\t// What is the download path configured in the current system\n\tDownloadDirectory string `json:\"download-directory\" yaml:\"download-directory\"`\n\n\t// TODO Can be used to track download progress\n\tProgressTracker getter.ProgressTracker\n\n\t// It's used to send information back in real time\n\tMessageChannel *message.Channel[*schema.Diagnostics] `json:\"message-channel\"`\n\n\t// How do I go from the root module to the current module\n\tDependenciesTree []string `json:\"dependencies-tree\" yaml:\"dependencies-tree\"`\n}\n\n// DeepDependenciesTree The dependence goes deeper\nfunc (x *ModuleLoaderOptions) DeepDependenciesTree(source string) []string {\n\tdependenciesTree := make([]string, len(x.DependenciesTree)+1)\n\tdependenciesTree[0] = source\n\tfor index, source := range x.DependenciesTree {\n\t\tdependenciesTree[index+1] = source\n\t}\n\treturn dependenciesTree\n}\n\n//func (x *ModuleLoaderOptions) Copy() *ModuleLoaderOptions {\n//\treturn &ModuleLoaderOptions{\n//\t\tSource:  x.Source,\n//\t\tVersion: x.Version,\n//\t\t// TODO\n//\t\t//ProgressTracker:   x.ProgressTracker,\n//\t\tDownloadDirectory: x.DownloadDirectory,\n//\t\tMessageChannel:    x.MessageChannel.MakeChildChannel(),\n//\t\tDependenciesTree:  append([]string{}, x.DependenciesTree...),\n//\t}\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ModuleLoader Module loader\ntype ModuleLoader[Options any] interface {\n\n\t// Name The name of the loader\n\tName() ModuleLoaderType\n\n\t// Load Use this loader to load the module\n\tLoad(ctx context.Context) (*module.Module, bool)\n\n\tOptions() Options\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module_loader/module_loader_manager.go",
    "content": "package module_loader\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ModuleLoaderType string\n\nconst (\n\n\t// ModuleLoaderTypeInvalid Default value if not set\n\tModuleLoaderTypeInvalid ModuleLoaderType = \"\"\n\n\t// ModuleLoaderTypeS3Bucket Load modules from S3 bucket.s\n\tModuleLoaderTypeS3Bucket ModuleLoaderType = \"s3-bucket-module-loader\"\n\n\t// ModuleLoaderTypeGitHubRegistry Load the module from GitHub's Registry\n\tModuleLoaderTypeGitHubRegistry ModuleLoaderType = \"github-registry-module-loader\"\n\n\t// ModuleLoaderTypeLocalDirectory Load the module from the local directory\n\tModuleLoaderTypeLocalDirectory ModuleLoaderType = \"local-directory-module-loader\"\n\n\tModuleLoaderTypeURL ModuleLoaderType = \"url-module-loader\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nvar Pattern = regexp.MustCompile(\"^[A-Za-z_-]?[\\\\w\\\\-_@.]+$\")\n\n// NewModuleLoaderBySource Distributed to different module loaders based on load options\nfunc NewModuleLoaderBySource(source string) ModuleLoaderType {\n\tformatSource := strings.ToLower(source)\n\tswitch {\n\tcase strings.HasPrefix(formatSource, \"s3://\"):\n\t\treturn ModuleLoaderTypeS3Bucket\n\tcase strings.HasPrefix(formatSource, \"http://\") || strings.HasPrefix(formatSource, \"https://\"):\n\t\treturn ModuleLoaderTypeURL\n\tcase strings.HasPrefix(source, \"./\") || strings.HasPrefix(source, \"../\"):\n\t\treturn ModuleLoaderTypeLocalDirectory\n\tcase Pattern.MatchString(source):\n\t\treturn ModuleLoaderTypeGitHubRegistry\n\tdefault:\n\t\treturn ModuleLoaderTypeInvalid\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/module_loader/module_loader_manager_test.go",
    "content": "package module_loader\n\nimport (\n\t\"testing\"\n)\n\nfunc TestNewModuleLoaderBySource(t *testing.T) {\n\tsource := NewModuleLoaderBySource(\"rules-aws-misconfigure-s3@v0.0.1\")\n\tt.Log(source)\n}\n"
  },
  {
    "path": "pkg/modules/module_loader/s3_bucket_path_module_loader.go",
    "content": "package module_loader\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/md5_util\"\n\t\"github.com/selefra/selefra/pkg/http_client\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"path/filepath\"\n)\n\n// TODO Need to test\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype S3BucketModuleLoaderOptions struct {\n\t*ModuleLoaderOptions\n\n\tS3BucketURL string\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype S3BucketModuleLoader struct {\n\toptions                     *S3BucketModuleLoaderOptions\n\tmoduleDownloadDirectoryPath string\n}\n\nvar _ ModuleLoader[*S3BucketModuleLoaderOptions] = &S3BucketModuleLoader{}\n\nfunc NewS3BucketModuleLoader(options *S3BucketModuleLoaderOptions) (*S3BucketModuleLoader, error) {\n\n\tdirectoryName, err := md5_util.Md5String(options.S3BucketURL)\n\tif err != nil {\n\t\t// TODO\n\t\treturn nil, err\n\t}\n\tmoduleDownloadDirectoryPath := filepath.Join(options.DownloadDirectory, DownloadModulesDirectoryName, string(ModuleLoaderTypeS3Bucket), directoryName)\n\n\treturn &S3BucketModuleLoader{\n\t\toptions:                     options,\n\t\tmoduleDownloadDirectoryPath: moduleDownloadDirectoryPath,\n\t}, nil\n}\n\nfunc (x *S3BucketModuleLoader) Name() ModuleLoaderType {\n\treturn ModuleLoaderTypeS3Bucket\n}\n\nfunc (x *S3BucketModuleLoader) Options() *S3BucketModuleLoaderOptions {\n\treturn x.options\n}\n\nfunc (x *S3BucketModuleLoader) Load(ctx context.Context) (*module.Module, bool) {\n\n\tdefer func() {\n\t\tx.options.MessageChannel.SenderWaitAndClose()\n\t}()\n\n\t// step 01. Download and decompress\n\terr := http_client.DownloadToDirectory(ctx, x.options.S3BucketURL, x.moduleDownloadDirectoryPath, x.options.ProgressTracker)\n\tif err != nil {\n\t\t// TODO\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"\"))\n\t\treturn nil, false\n\t}\n\n\t// step 02. The download is decompressed and converted to loading from the local path\n\tlocalDirectoryModuleLoaderOptions := &LocalDirectoryModuleLoaderOptions{\n\t\t// TODO\n\t\t//ModuleLoaderOptions: x.options.ModuleLoaderOptions.Copy(),\n\t\tModuleDirectory: x.moduleDownloadDirectoryPath,\n\t}\n\tloader, err := NewLocalDirectoryModuleLoader(localDirectoryModuleLoaderOptions)\n\tif err != nil {\n\t\t// TODO\n\t\tlocalDirectoryModuleLoaderOptions.MessageChannel.SenderWaitAndClose()\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"\"))\n\t\treturn nil, false\n\t}\n\n\treturn loader.Load(ctx)\n}\n"
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_a/sub_module_a_1/modules.yml",
    "content": "modules:\n  - name: module_name_sub_module_a_1\n    uses:\n  #      - rules/s3/bucket_acl_publicly_readable.yaml\n  #      - rules/s3/bucket_acl_publicly_writeable.yaml\n  #      - rules/s3/bucket_allow_http_access.yaml\n  #      - rules/s3/bucket_default_encryption_disable.yaml\n  #      - rules/s3/bucket_is_not_configured_with_cors_rules.yaml\n  #      - rules/s3/bucket_logging_disable.yaml\n  #      - rules/s3/bucket_not_configured_block_public_access.yaml\n  #      - rules/s3/bucket_object_traversal_by_acl.yaml\n  #      - rules/s3/bucket_object_traversal_by_policy.yaml\n  #      - rules/s3/bucket_publicly_readable.yaml\n  #      - rules/s3/bucket_publicly_writeable.yaml\n  #      - rules/s3/bucket_source_ip_not_set.yaml\n  #      - rules/s3/bucket_versioning_is_disabled.yaml\n  #      - rules/s3/mfa_delete_is_disable.yaml\n  #      - rules/s3/s3_account_level_public_access_not_blocks.yaml\n  #      - rules/s3/s3_bucket_blacklisted_actions_prohibited.yaml\n  #      - rules/s3/s3_bucket_replication_disabled.yaml\n  #      - rules/s3/s3_is_not_protected_by_backup_plan.yaml\n  #      - rules/s3/s3_last_backup_recovery_point.yaml\n  #      - rules/s3/s3_not_default_encryption_kms.yaml\n  #      - rules/s3/s3_version_lifecycle_policy_not_check.yaml\n  #      - rules/s3/the_target_bucket_for_server_access_logging_is_the_bucket_itself.yaml\n  - name: example_module\n    #    uses: ./rules/\n    input:\n      name: selefra"
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_a/sub_module_a_1/rules.yml",
    "content": "rules:\n  - name: rule_name_sub_module_a_1\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\""
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_a/sub_module_a_1/selefra.yml",
    "content": "selefra:\n#  cloud:\n#    project: example_project\n#    organization: example_org\n#    hostname: app.selefra.io\n#  connection:\n#    type: postgres\n#    username: postgres\n#    password: pass\n#    host: localhost\n#    port: \"5432\"\n#    database: postgres\n#    sslmode: disable\n#  name: example_project\n#  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\""
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_a/sub_module_a_2/modules.yml",
    "content": "modules:\n  - name: module_name_sub_module_a_2\n    uses:\n  #      - rules/s3/bucket_acl_publicly_readable.yaml\n  #      - rules/s3/bucket_acl_publicly_writeable.yaml\n  #      - rules/s3/bucket_allow_http_access.yaml\n  #      - rules/s3/bucket_default_encryption_disable.yaml\n  #      - rules/s3/bucket_is_not_configured_with_cors_rules.yaml\n  #      - rules/s3/bucket_logging_disable.yaml\n  #      - rules/s3/bucket_not_configured_block_public_access.yaml\n  #      - rules/s3/bucket_object_traversal_by_acl.yaml\n  #      - rules/s3/bucket_object_traversal_by_policy.yaml\n  #      - rules/s3/bucket_publicly_readable.yaml\n  #      - rules/s3/bucket_publicly_writeable.yaml\n  #      - rules/s3/bucket_source_ip_not_set.yaml\n  #      - rules/s3/bucket_versioning_is_disabled.yaml\n  #      - rules/s3/mfa_delete_is_disable.yaml\n  #      - rules/s3/s3_account_level_public_access_not_blocks.yaml\n  #      - rules/s3/s3_bucket_blacklisted_actions_prohibited.yaml\n  #      - rules/s3/s3_bucket_replication_disabled.yaml\n  #      - rules/s3/s3_is_not_protected_by_backup_plan.yaml\n  #      - rules/s3/s3_last_backup_recovery_point.yaml\n  #      - rules/s3/s3_not_default_encryption_kms.yaml\n  #      - rules/s3/s3_version_lifecycle_policy_not_check.yaml\n  #      - rules/s3/the_target_bucket_for_server_access_logging_is_the_bucket_itself.yaml\n  - name: example_module\n    #    uses: ./rules/\n    input:\n      name: selefra"
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_a/sub_module_a_2/rules.yml",
    "content": "rules:\n  - name: rule_name_sub_module_a_2\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\""
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_a/sub_module_a_2/selefra.yml",
    "content": "selefra:\n#  cloud:\n#    project: example_project\n#    organization: example_org\n#    hostname: app.selefra.io\n#  connection:\n#    type: postgres\n#    username: postgres\n#    password: pass\n#    host: localhost\n#    port: \"5432\"\n#    database: postgres\n#    sslmode: disable\n#  name: example_project\n#  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\""
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_b/sub_module_b_1/modules.yml",
    "content": "modules:\n  - name: module_name_sub_module_b_1\n    uses:\n  #      - rules/s3/bucket_acl_publicly_readable.yaml\n  #      - rules/s3/bucket_acl_publicly_writeable.yaml\n  #      - rules/s3/bucket_allow_http_access.yaml\n  #      - rules/s3/bucket_default_encryption_disable.yaml\n  #      - rules/s3/bucket_is_not_configured_with_cors_rules.yaml\n  #      - rules/s3/bucket_logging_disable.yaml\n  #      - rules/s3/bucket_not_configured_block_public_access.yaml\n  #      - rules/s3/bucket_object_traversal_by_acl.yaml\n  #      - rules/s3/bucket_object_traversal_by_policy.yaml\n  #      - rules/s3/bucket_publicly_readable.yaml\n  #      - rules/s3/bucket_publicly_writeable.yaml\n  #      - rules/s3/bucket_source_ip_not_set.yaml\n  #      - rules/s3/bucket_versioning_is_disabled.yaml\n  #      - rules/s3/mfa_delete_is_disable.yaml\n  #      - rules/s3/s3_account_level_public_access_not_blocks.yaml\n  #      - rules/s3/s3_bucket_blacklisted_actions_prohibited.yaml\n  #      - rules/s3/s3_bucket_replication_disabled.yaml\n  #      - rules/s3/s3_is_not_protected_by_backup_plan.yaml\n  #      - rules/s3/s3_last_backup_recovery_point.yaml\n  #      - rules/s3/s3_not_default_encryption_kms.yaml\n  #      - rules/s3/s3_version_lifecycle_policy_not_check.yaml\n  #      - rules/s3/the_target_bucket_for_server_access_logging_is_the_bucket_itself.yaml\n  - name: example_module\n    #    uses: ./rules/\n    input:\n      name: selefra"
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_b/sub_module_b_1/rules.yml",
    "content": "rules:\n  - name: rule_name_sub_module_b_1\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\""
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_b/sub_module_b_1/selefra.yml",
    "content": "selefra:\n#  cloud:\n#    project: example_project\n#    organization: example_org\n#    hostname: app.selefra.io\n#  connection:\n#    type: postgres\n#    username: postgres\n#    password: pass\n#    host: localhost\n#    port: \"5432\"\n#    database: postgres\n#    sslmode: disable\n#  name: example_project\n#  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\""
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_b/sub_module_b_2/modules.yml",
    "content": "modules:\n  - name: module_name_sub_module_b_2\n    uses:\n  #      - rules/s3/bucket_acl_publicly_readable.yaml\n  #      - rules/s3/bucket_acl_publicly_writeable.yaml\n  #      - rules/s3/bucket_allow_http_access.yaml\n  #      - rules/s3/bucket_default_encryption_disable.yaml\n  #      - rules/s3/bucket_is_not_configured_with_cors_rules.yaml\n  #      - rules/s3/bucket_logging_disable.yaml\n  #      - rules/s3/bucket_not_configured_block_public_access.yaml\n  #      - rules/s3/bucket_object_traversal_by_acl.yaml\n  #      - rules/s3/bucket_object_traversal_by_policy.yaml\n  #      - rules/s3/bucket_publicly_readable.yaml\n  #      - rules/s3/bucket_publicly_writeable.yaml\n  #      - rules/s3/bucket_source_ip_not_set.yaml\n  #      - rules/s3/bucket_versioning_is_disabled.yaml\n  #      - rules/s3/mfa_delete_is_disable.yaml\n  #      - rules/s3/s3_account_level_public_access_not_blocks.yaml\n  #      - rules/s3/s3_bucket_blacklisted_actions_prohibited.yaml\n  #      - rules/s3/s3_bucket_replication_disabled.yaml\n  #      - rules/s3/s3_is_not_protected_by_backup_plan.yaml\n  #      - rules/s3/s3_last_backup_recovery_point.yaml\n  #      - rules/s3/s3_not_default_encryption_kms.yaml\n  #      - rules/s3/s3_version_lifecycle_policy_not_check.yaml\n  #      - rules/s3/the_target_bucket_for_server_access_logging_is_the_bucket_itself.yaml\n  - name: example_module\n    #    uses: ./rules/\n    input:\n      name: selefra"
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_b/sub_module_b_2/rules.yml",
    "content": "rules:\n  - name: rule_name_sub_module_b_2\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\""
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/sub_module_b/sub_module_b_2/selefra.yml",
    "content": "selefra:\n#  cloud:\n#    project: example_project\n#    organization: example_org\n#    hostname: app.selefra.io\n#  connection:\n#    type: postgres\n#    username: postgres\n#    password: pass\n#    host: localhost\n#    port: \"5432\"\n#    database: postgres\n#    sslmode: disable\n#  name: example_project\n#  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\""
  },
  {
    "path": "pkg/modules/module_loader/test_data/contains_sub_module/test.yaml",
    "content": "rules:\n  - name: bucket_is_not_configured_with_cors_rules\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\"\n\nselefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\"\n\nmodules:\n  - name: Misconfiguration-S3\n    uses:\n      - ./sub_module_b/sub_module_b_1\n      - ./sub_module_b/sub_module_b_2\n      - ./sub_module_a/sub_module_a_1\n      - ./sub_module_a/sub_module_a_2\n  - name: example_module\n    #    uses: ./rules/\n    input:\n      name: selefra\n\nvariables:\n  - key: test\n    default:\n      a: 1\n      b: 1\n      c: 1\n\nproviders:\n  - name: aws_01\n    cache: 1d\n    provider: aws\n    resources:\n      - aws_s3_buckets\n      - aws_s3_accounts\n    #  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n    accounts:\n      #     Optional. User identification\n      - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n        #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n        shared_config_profile: < PROFILE_NAME >\n        #    Optional. Location of shared configuration files\n        shared_config_files:\n          - <FILE_PATH>\n        #   Optional. Location of shared credentials files\n        shared_credentials_files:\n          - <FILE_PATH>\n        #    Optional. Role ARN we want to assume when accessing this account\n        role_arn: < YOUR_ROLE_ARN >\n        #    Optional. Named role session to grab specific operation under the assumed role\n        role_session_name: <SESSION_NAME>\n        #    Optional. Any outside of the org account id that has additional control\n        external_id: <ID>\n        #    Optional. Designated region of servers\n        default_region: <REGION_CODE>\n        #    Optional. by default assumes all regions\n        regions:\n          - us-east-1\n          - us-west-2\n    #    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n    max_attempts: 10\n    #    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n    max_backoff: 30"
  },
  {
    "path": "pkg/modules/module_loader/test_data/dead_loop_module/modules.yaml",
    "content": "selefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\"\n\nmodules:\n  - name: Misconfiguration-S3\n    uses:\n      - ./sub_module_a/\n      - ./sub_module_b/\n"
  },
  {
    "path": "pkg/modules/module_loader/test_data/dead_loop_module/sub_module_a/modules.yaml",
    "content": "selefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\"\n\nmodules:\n  - name: Misconfiguration-S3\n    uses:\n      - ./../sub_module_b\n"
  },
  {
    "path": "pkg/modules/module_loader/test_data/dead_loop_module/sub_module_b/modules.yaml",
    "content": "selefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\"\n\nmodules:\n  - name: Misconfiguration-S3\n    uses:\n      - ./../sub_module_a\n"
  },
  {
    "path": "pkg/modules/module_loader/test_data/module_mixed/sub_module_local/modules.yml",
    "content": "modules:\n  - name: sub_module_local\n    uses:\n  #      - rules/s3/bucket_acl_publicly_readable.yaml\n  #      - rules/s3/bucket_acl_publicly_writeable.yaml\n  #      - rules/s3/bucket_allow_http_access.yaml\n  #      - rules/s3/bucket_default_encryption_disable.yaml\n  #      - rules/s3/bucket_is_not_configured_with_cors_rules.yaml\n  #      - rules/s3/bucket_logging_disable.yaml\n  #      - rules/s3/bucket_not_configured_block_public_access.yaml\n  #      - rules/s3/bucket_object_traversal_by_acl.yaml\n  #      - rules/s3/bucket_object_traversal_by_policy.yaml\n  #      - rules/s3/bucket_publicly_readable.yaml\n  #      - rules/s3/bucket_publicly_writeable.yaml\n  #      - rules/s3/bucket_source_ip_not_set.yaml\n  #      - rules/s3/bucket_versioning_is_disabled.yaml\n  #      - rules/s3/mfa_delete_is_disable.yaml\n  #      - rules/s3/s3_account_level_public_access_not_blocks.yaml\n  #      - rules/s3/s3_bucket_blacklisted_actions_prohibited.yaml\n  #      - rules/s3/s3_bucket_replication_disabled.yaml\n  #      - rules/s3/s3_is_not_protected_by_backup_plan.yaml\n  #      - rules/s3/s3_last_backup_recovery_point.yaml\n  #      - rules/s3/s3_not_default_encryption_kms.yaml\n  #      - rules/s3/s3_version_lifecycle_policy_not_check.yaml\n  #      - rules/s3/the_target_bucket_for_server_access_logging_is_the_bucket_itself.yaml\n#  - name: example_module\n#    #    uses: ./rules/\n#    input:\n#      name: selefra"
  },
  {
    "path": "pkg/modules/module_loader/test_data/module_mixed/sub_module_local/rules.yml",
    "content": "rules:\n  - name: rule_sub_module_local\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\""
  },
  {
    "path": "pkg/modules/module_loader/test_data/module_mixed/sub_module_local/selefra.yml",
    "content": "selefra:\n#  cloud:\n#    project: example_project\n#    organization: example_org\n#    hostname: app.selefra.io\n#  connection:\n#    type: postgres\n#    username: postgres\n#    password: pass\n#    host: localhost\n#    port: \"5432\"\n#    database: postgres\n#    sslmode: disable\n#  name: example_project\n#  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\""
  },
  {
    "path": "pkg/modules/module_loader/test_data/module_mixed/test.yaml",
    "content": "rules:\n  - name: bucket_is_not_configured_with_cors_rules\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\"\n\nselefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\"\n\nmodules:\n  - name: Misconfiguration-S3\n    uses:\n      - ./sub_module_local\n      - rules-aws-misconfigure-s3\n\n\n\nvariables:\n  - key: test\n    default:\n      a: 1\n      b: 1\n      c: 1\n\nproviders:\n  - name: aws_01\n    cache: 1d\n    provider: aws\n    resources:\n      - aws_s3_buckets\n      - aws_s3_accounts\n    #  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n    accounts:\n      #     Optional. User identification\n      - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n        #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n        shared_config_profile: < PROFILE_NAME >\n        #    Optional. Location of shared configuration files\n        shared_config_files:\n          - <FILE_PATH>\n        #   Optional. Location of shared credentials files\n        shared_credentials_files:\n          - <FILE_PATH>\n        #    Optional. Role ARN we want to assume when accessing this account\n        role_arn: < YOUR_ROLE_ARN >\n        #    Optional. Named role session to grab specific operation under the assumed role\n        role_session_name: <SESSION_NAME>\n        #    Optional. Any outside of the org account id that has additional control\n        external_id: <ID>\n        #    Optional. Designated region of servers\n        default_region: <REGION_CODE>\n        #    Optional. by default assumes all regions\n        regions:\n          - us-east-1\n          - us-west-2\n    #    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n    max_attempts: 10\n    #    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n    max_backoff: 30"
  },
  {
    "path": "pkg/modules/module_loader/test_data/module_use_circle/test.yaml",
    "content": "rules:\n  - name: bucket_is_not_configured_with_cors_rules\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\"\n\nselefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\"\n\n#modules:\n#  - name: Misconfiguration-S3\n#    uses:\n#      - ./sub_module_b/sub_module_b_1\n#  - name: example_module\n#    #    uses: ./rules/\n#    input:\n#      name: selefra\nmodules:\n  name: Misconfiguration-S3\n  uses:\n    - ./sub_module_b/sub_module_b_1\n\n\nvariables:\n  - key: test\n    default:\n      a: 1\n      b: 1\n      c: 1\n\nproviders:\n  - name: aws_01\n    cache: 1d\n    provider: aws\n    resources:\n      - aws_s3_buckets\n      - aws_s3_accounts\n    #  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n    accounts:\n      #     Optional. User identification\n      - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n        #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n        shared_config_profile: < PROFILE_NAME >\n        #    Optional. Location of shared configuration files\n        shared_config_files:\n          - <FILE_PATH>\n        #   Optional. Location of shared credentials files\n        shared_credentials_files:\n          - <FILE_PATH>\n        #    Optional. Role ARN we want to assume when accessing this account\n        role_arn: < YOUR_ROLE_ARN >\n        #    Optional. Named role session to grab specific operation under the assumed role\n        role_session_name: <SESSION_NAME>\n        #    Optional. Any outside of the org account id that has additional control\n        external_id: <ID>\n        #    Optional. Designated region of servers\n        default_region: <REGION_CODE>\n        #    Optional. by default assumes all regions\n        regions:\n          - us-east-1\n          - us-west-2\n    #    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n    max_attempts: 10\n    #    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n    max_backoff: 30"
  },
  {
    "path": "pkg/modules/module_loader/test_data/normal_single_module/test.yaml",
    "content": "rules:\n  - name: bucket_is_not_configured_with_cors_rules\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\"\n\nselefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\"\n\nmodules:\n  - name: Misconfiguration-S3\n    uses:\n      - ./sub_module_b/sub_module_b_1\n      - ./sub_module_b/sub_module_b_2\n      - ./sub_module_a/sub_module_a_1\n      - ./sub_module_a/sub_module_a_2\n  - name: example_module\n    #    uses: ./rules/\n    input:\n      name: selefra\n\nvariables:\n  - key: test\n    default:\n      a: 1\n      b: 1\n      c: 1\n\nproviders:\n  - name: aws_01\n    cache: 1d\n    provider: aws\n    resources:\n      - aws_s3_buckets\n      - aws_s3_accounts\n    #  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n    accounts:\n      #     Optional. User identification\n      - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n        #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n        shared_config_profile: < PROFILE_NAME >\n        #    Optional. Location of shared configuration files\n        shared_config_files:\n          - <FILE_PATH>\n        #   Optional. Location of shared credentials files\n        shared_credentials_files:\n          - <FILE_PATH>\n        #    Optional. Role ARN we want to assume when accessing this account\n        role_arn: < YOUR_ROLE_ARN >\n        #    Optional. Named role session to grab specific operation under the assumed role\n        role_session_name: <SESSION_NAME>\n        #    Optional. Any outside of the org account id that has additional control\n        external_id: <ID>\n        #    Optional. Designated region of servers\n        default_region: <REGION_CODE>\n        #    Optional. by default assumes all regions\n        regions:\n          - us-east-1\n          - us-west-2\n    #    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n    max_attempts: 10\n    #    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n    max_backoff: 30"
  },
  {
    "path": "pkg/modules/module_loader/url_module_loader.go",
    "content": "package module_loader\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/md5_util\"\n\t\"github.com/selefra/selefra/pkg/http_client\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"path/filepath\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// URLModuleLoaderOptions Parameter options when creating the URL module loader\ntype URLModuleLoaderOptions struct {\n\t*ModuleLoaderOptions\n\n\t// Module URL, It's a zip package\n\tModuleURL string `json:\"module-url\" yaml:\"module-url\"`\n}\n\n//func (x *URLModuleLoaderOptions) Copy() *URLModuleLoaderOptions {\n//\treturn &URLModuleLoaderOptions{\n//\t\t// TODO\n//\t\t//ModuleLoaderOptions: x.ModuleLoaderOptions.Copy(),\n//\t\tModuleURL: x.ModuleURL,\n//\t}\n//}\n\n//func (x *URLModuleLoaderOptions) CopyForURL(moduleURL string) *URLModuleLoaderOptions {\n//\toptions := x.Copy()\n//\toptions.ModuleURL = moduleURL\n//\treturn options\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// URLModuleLoader Load the module from a URL, which should be a zipped package that happens to be the module's directory\ntype URLModuleLoader struct {\n\toptions *URLModuleLoaderOptions\n\n\t// Which path to download to\n\tmoduleDownloadDirectoryPath string\n}\n\nvar _ ModuleLoader[*URLModuleLoaderOptions] = &URLModuleLoader{}\n\nfunc NewURLModuleLoader(options *URLModuleLoaderOptions) (*URLModuleLoader, error) {\n\n\tdirectoryName, err := md5_util.Md5String(options.ModuleURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmoduleDownloadDirectoryPath := filepath.Join(options.DownloadDirectory, DownloadModulesDirectoryName, string(ModuleLoaderTypeURL), directoryName)\n\n\treturn &URLModuleLoader{\n\t\toptions:                     options,\n\t\tmoduleDownloadDirectoryPath: moduleDownloadDirectoryPath,\n\t}, nil\n}\n\nfunc (x *URLModuleLoader) Name() ModuleLoaderType {\n\treturn ModuleLoaderTypeURL\n}\n\nfunc (x *URLModuleLoader) Load(ctx context.Context) (*module.Module, bool) {\n\n\tdefer func() {\n\t\tx.options.MessageChannel.SenderWaitAndClose()\n\t}()\n\n\t// step 01. Download and decompress\n\terr := http_client.DownloadToDirectory(ctx, x.moduleDownloadDirectoryPath, x.options.ModuleURL, x.options.ProgressTracker)\n\tif err != nil {\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"module load from %s failed, error = %s\", x.options.ModuleURL, err.Error()))\n\t\treturn nil, false\n\t}\n\n\t// send tips\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"download url module %s to local directory %s\", x.options.ModuleURL, x.moduleDownloadDirectoryPath))\n\n\t// step 02. The download is decompressed and converted to loading from the local path\n\tlocalDirectoryModuleLoaderOptions := &LocalDirectoryModuleLoaderOptions{\n\t\tModuleLoaderOptions: &ModuleLoaderOptions{\n\t\t\tSource:  x.options.Source,\n\t\t\tVersion: x.options.Version,\n\t\t\t// TODO\n\t\t\t//ProgressTracker:   x.ProgressTracker,\n\t\t\tDownloadDirectory: x.options.DownloadDirectory,\n\t\t\tMessageChannel:    x.options.MessageChannel.MakeChildChannel(),\n\t\t\t// The dependency level does not increase\n\t\t\tDependenciesTree: x.options.DependenciesTree,\n\t\t},\n\t\tModuleDirectory: x.moduleDownloadDirectoryPath,\n\t}\n\tloader, err := NewLocalDirectoryModuleLoader(localDirectoryModuleLoaderOptions)\n\tif err != nil {\n\t\tlocalDirectoryModuleLoaderOptions.MessageChannel.SenderWaitAndClose()\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"create local directory %s module loader error: %s\", x.moduleDownloadDirectoryPath, err.Error()))\n\t\treturn nil, false\n\t}\n\n\treturn loader.Load(ctx)\n}\n\nfunc (x *URLModuleLoader) Options() *URLModuleLoaderOptions {\n\treturn x.options\n}\n"
  },
  {
    "path": "pkg/modules/module_loader/url_module_loader_test.go",
    "content": "package module_loader\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestURLModuleLoader_Load(t *testing.T) {\n\n\tsource := \"https://github.com/selefra/rules-aws-misconfiguration-s3/releases/download/v0.0.1/rules-aws-misconfigure-s3.zip\"\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, d *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(d) {\n\t\t\tt.Log(d.ToString())\n\t\t}\n\t})\n\tloader, err := NewURLModuleLoader(&URLModuleLoaderOptions{\n\t\tModuleLoaderOptions: &ModuleLoaderOptions{\n\t\t\tSource:            source,\n\t\t\tVersion:           \"\",\n\t\t\tDownloadDirectory: \"./test_download\",\n\t\t\t//ProgressTracker:   testProgressTracker{},\n\t\t\tMessageChannel:   messageChannel,\n\t\t\tDependenciesTree: []string{source},\n\t\t},\n\t\tModuleURL: source,\n\t})\n\tassert.Nil(t, err)\n\trootModule, b := loader.Load(context.Background())\n\tmessageChannel.ReceiverWait()\n\tassert.True(t, b)\n\tassert.NotNil(t, rootModule)\n\n}\n\n//type testProgressTracker struct {\n//}\n//\n//var _ getter.ProgressTracker = testProgressTracker{}\n//\n//func (x testProgressTracker) TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) {\n//\tfmt.Println(float64(currentSize) * 100 / float64(totalSize))\n//\treturn stream\n//}\n"
  },
  {
    "path": "pkg/modules/parser/modules.go",
    "content": "package parser\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/reflect_util\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"gopkg.in/yaml.v3\"\n)\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\nconst (\n\tModulesBlockName = \"modules\"\n)\n\n// Parse modules block\nfunc (x *YamlFileToModuleParser) parseModulesBlock(moduleBlockKeyNode, moduleBlockValueNode *yaml.Node, diagnostics *schema.Diagnostics) module.ModulesBlock {\n\n\t// modules must be an array element\n\tif moduleBlockValueNode.Kind != yaml.SequenceNode {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForArrayType(moduleBlockValueNode, ModulesBlockName))\n\t\treturn nil\n\t}\n\n\t// Parse each child element\n\tmodulesBlock := make(module.ModulesBlock, 0)\n\tfor index, moduleNode := range moduleBlockValueNode.Content {\n\t\tblock := x.parseModuleBlock(index, moduleNode, diagnostics)\n\t\tif block != nil {\n\t\t\tmodulesBlock = append(modulesBlock, block)\n\t\t}\n\t}\n\n\treturn modulesBlock\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\nconst (\n\tModuleBlockNameFieldName   = \"name\"\n\tModuleBlockUsesFieldName   = \"uses\"\n\tModuleBlockFilterFieldName = \"filter\"\n\tModuleBlockInputFieldName  = \"input\"\n)\n\n// Parse module block\nfunc (x *YamlFileToModuleParser) parseModuleBlock(moduleIndex int, moduleBlockNode *yaml.Node, diagnostics *schema.Diagnostics) *module.ModuleBlock {\n\n\tblockPath := fmt.Sprintf(\"%s[%d]\", ModulesBlockName, moduleIndex)\n\n\ttoMap, d := x.toMap(moduleBlockNode, blockPath)\n\tdiagnostics.AddDiagnostics(d)\n\tif utils.HasError(d) {\n\t\treturn nil\n\t}\n\n\tmoduleBlock := module.NewModuleBlock()\n\tfor key, entry := range toMap {\n\t\tswitch key {\n\t\tcase ModuleBlockNameFieldName:\n\t\t\tmoduleBlock.Name = x.parseStringValueWithDiagnosticsAndSetLocation(moduleBlock, ModuleBlockNameFieldName, entry, blockPath, diagnostics)\n\n\t\tcase ModuleBlockUsesFieldName:\n\t\t\tmoduleBlock.Uses = x.parseStringValueWithDiagnosticsAndSetLocation(moduleBlock, ModuleBlockUsesFieldName, entry, blockPath, diagnostics)\n\n\t\tcase ModuleBlockFilterFieldName:\n\t\t\tmoduleBlock.Filter = x.parseFilterValueWithDiagnosticsAndSetLocation(moduleBlock, ModuleBlockFilterFieldName, entry, blockPath, diagnostics)\n\n\t\tcase ModuleBlockInputFieldName:\n\t\t\tinputMap := x.parseModuleInputBlock(moduleBlock, moduleIndex, entry.key, entry.value, diagnostics)\n\t\t\tif len(inputMap) != 0 {\n\t\t\t\tmoduleBlock.Input = inputMap\n\t\t\t}\n\n\t\tdefault:\n\t\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForUnSupport(entry.key, entry.value, fmt.Sprintf(\"%s.%s\", blockPath, key)))\n\n\t\t}\n\t}\n\n\tif moduleBlock.IsEmpty() {\n\t\treturn nil\n\t}\n\n\t// set location\n\tx.setLocationKVWithDiagnostics(moduleBlock, \"\", blockPath, newNodeEntry(nil, moduleBlockNode), diagnostics)\n\n\treturn moduleBlock\n}\n\nfunc (x *YamlFileToModuleParser) parseModuleInputBlock(moduleBlock *module.ModuleBlock, index int, keyNode, valueNode *yaml.Node, diagnostics *schema.Diagnostics) map[string]any {\n\n\tblockPath := fmt.Sprintf(\"%s[%d].%s\", ModulesBlockName, index, ModuleBlockInputFieldName)\n\n\tif valueNode.Kind != yaml.MappingNode {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForMappingType(valueNode, blockPath))\n\t\treturn nil\n\t}\n\n\ttoMap, d := x.toMap(valueNode, blockPath)\n\tdiagnostics.AddDiagnostics(d)\n\tif utils.HasError(d) {\n\t\treturn nil\n\t}\n\n\tinputMap := make(map[string]any)\n\tfor key, entry := range toMap {\n\t\tparseAny, d := x.parseAny(entry.value, fmt.Sprintf(\"%s.%s\", blockPath, key))\n\t\tdiagnostics.AddDiagnostics(d)\n\t\tif !reflect_util.IsNil(parseAny) {\n\t\t\tinputMap[key] = parseAny\n\n\t\t\t// set location\n\t\t\tx.setLocationKVWithDiagnostics(moduleBlock, ModuleBlockInputFieldName+\".\"+key, blockPath, entry, diagnostics)\n\t\t}\n\t}\n\n\tif len(inputMap) == 0 {\n\t\treturn nil\n\t}\n\n\tx.setLocationKVWithDiagnostics(moduleBlock, ModuleBlockInputFieldName, blockPath, newNodeEntry(keyNode, valueNode), diagnostics)\n\n\treturn inputMap\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/parser/modules_test.go",
    "content": "package parser\n\nimport (\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestYamlFileToModuleParser_parseModulesBlock(t *testing.T) {\n\tmodule, diagnostics := NewYamlFileToModuleParser(\"./test_data/test_parse_modules/modules.yaml\", make(map[string]interface{})).Parse()\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, module.ModulesBlock)\n\n\tmoduleBlock := module.ModulesBlock[1]\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"\").ReadSourceString())\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"name._key\").ReadSourceString())\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"name._value\").ReadSourceString())\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"name\").ReadSourceString())\n\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"input._key\").ReadSourceString())\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"input._value\").ReadSourceString())\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"input\").ReadSourceString())\n\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"input.name._key\").ReadSourceString())\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"input.name._value\").ReadSourceString())\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"input.name\").ReadSourceString())\n\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"uses._key\").ReadSourceString())\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"uses._value\").ReadSourceString())\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"uses\").ReadSourceString())\n\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"uses[0]\").ReadSourceString())\n\tassert.NotEmpty(t, moduleBlock.GetNodeLocation(\"uses[0]._value\").ReadSourceString())\n\n}\n"
  },
  {
    "path": "pkg/modules/parser/providers.go",
    "content": "package parser\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"gopkg.in/yaml.v3\"\n)\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\nconst (\n\tProvidersBlockName = \"providers\"\n)\n\nfunc (x *YamlFileToModuleParser) parseProvidersBlock(providersBlockKeyNode, providersBlockValueNode *yaml.Node, diagnostics *schema.Diagnostics) module.ProvidersBlock {\n\n\t// modules must be an array element\n\tif providersBlockValueNode.Kind != yaml.SequenceNode {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForArrayType(providersBlockKeyNode, ProvidersBlockName))\n\t\treturn nil\n\t}\n\n\t// Parse each child element\n\tmodulesBlock := make(module.ProvidersBlock, 0)\n\tfor index, moduleNode := range providersBlockValueNode.Content {\n\t\tblock := x.parseProviderBlock(index, moduleNode, diagnostics)\n\t\tif block != nil {\n\t\t\tmodulesBlock = append(modulesBlock, block)\n\t\t}\n\t}\n\treturn modulesBlock\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\nconst (\n\tProviderBlockNameFieldName          = \"name\"\n\tProviderBlockCacheFieldName         = \"cache\"\n\tProviderBlockProviderFieldName      = \"provider\"\n\tProviderBlockMaxGoroutinesFieldName = \"max_goroutines\"\n\tProviderBlockResourcesFieldName     = \"resources\"\n\n\t// ProviderBlockProvidersConfigYamlStringFieldName Virtual field\n\t//ProviderBlockProvidersConfigYamlStringFieldName = \"\"\n)\n\nfunc (x *YamlFileToModuleParser) parseProviderBlock(index int, providerBlockValueNode *yaml.Node, diagnostics *schema.Diagnostics) *module.ProviderBlock {\n\n\tblockPath := fmt.Sprintf(\"%s[%d]\", ProvidersBlockName, index)\n\n\ttoMap, d := x.toMap(providerBlockValueNode, blockPath)\n\tdiagnostics.AddDiagnostics(d)\n\tif utils.HasError(d) {\n\t\treturn nil\n\t}\n\n\tproviderBlock := module.NewProviderBlock()\n\n\t// name\n\tentry, exists := toMap[ProviderBlockNameFieldName]\n\tif exists {\n\t\tproviderBlock.Name = x.parseStringValueWithDiagnosticsAndSetLocation(providerBlock, ProviderBlockNameFieldName, entry, blockPath, diagnostics)\n\t}\n\n\t// cache\n\tentry, exists = toMap[ProviderBlockCacheFieldName]\n\tif exists {\n\t\tproviderBlock.Cache = x.parseStringValueWithDiagnosticsAndSetLocation(providerBlock, ProviderBlockCacheFieldName, entry, blockPath, diagnostics)\n\t}\n\n\t// provider\n\tentry, exists = toMap[ProviderBlockProviderFieldName]\n\tif exists {\n\t\tproviderBlock.Provider = x.parseStringValueWithDiagnosticsAndSetLocation(providerBlock, ProviderBlockProviderFieldName, entry, blockPath, diagnostics)\n\t}\n\n\t// max_goroutines\n\tentry, exists = toMap[ProviderBlockMaxGoroutinesFieldName]\n\tif exists {\n\t\tproviderBlock.MaxGoroutines = x.parseUintValueWithDiagnosticsAndSetLocation(providerBlock, ProviderBlockMaxGoroutinesFieldName, entry, blockPath, diagnostics)\n\t}\n\n\t// resources\n\tentry, exists = toMap[ProviderBlockResourcesFieldName]\n\tif exists {\n\t\tproviderBlock.Resources = x.parseStringSliceAndSetLocation(providerBlock, ProviderBlockResourcesFieldName, entry, blockPath, diagnostics)\n\t}\n\n\tout, err := yaml.Marshal(providerBlockValueNode)\n\tif err != nil {\n\t\t// TODO build error message\n\t\tdiagnostics.AddErrorMsg(\"build error message\")\n\t\treturn nil\n\t}\n\tproviderBlock.ProvidersConfigYamlString = string(out)\n\n\tif providerBlock.IsEmpty() {\n\t\treturn nil\n\t}\n\n\t// set location\n\tx.setLocationKVWithDiagnostics(providerBlock, \"\", blockPath, newNodeEntry(nil, providerBlockValueNode), diagnostics)\n\n\treturn providerBlock\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/parser/providers_test.go",
    "content": "package parser\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestYamlFileToModuleParser_parseProvidersBlock(t *testing.T) {\n\tmodule, diagnostics := NewYamlFileToModuleParser(\"./test_data/test_parse_providers/modules.yaml\").Parse()\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, module.ProvidersBlock)\n\n\tproviderBlock := module.ProvidersBlock[0]\n\tassert.NotEmpty(t, providerBlock.GetNodeLocation(\"\").ReadSourceString())\n\tassert.NotEmpty(t, providerBlock.GetNodeLocation(\"._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, providerBlock.GetNodeLocation(\"name._key\").ReadSourceString())\n\tassert.NotEmpty(t, providerBlock.GetNodeLocation(\"name._value\").ReadSourceString())\n\tassert.NotEmpty(t, providerBlock.GetNodeLocation(\"name\").ReadSourceString())\n\n\tassert.NotEmpty(t, providerBlock.GetNodeLocation(\"cache._key\").ReadSourceString())\n\tassert.NotEmpty(t, providerBlock.GetNodeLocation(\"cache._value\").ReadSourceString())\n\tassert.NotEmpty(t, providerBlock.GetNodeLocation(\"cache\").ReadSourceString())\n\n\tassert.NotEmpty(t, providerBlock.GetNodeLocation(\"resources._key\").ReadSourceString())\n\tassert.NotEmpty(t, providerBlock.GetNodeLocation(\"resources._value\").ReadSourceString())\n\tassert.NotEmpty(t, providerBlock.GetNodeLocation(\"resources\").ReadSourceString())\n\n\tfor i := 0; i < len(providerBlock.Resources); i++ {\n\t\tassert.NotEmpty(t, providerBlock.GetNodeLocation(fmt.Sprintf(\"resources[%d]\", i)).ReadSourceString())\n\t\tassert.NotEmpty(t, providerBlock.GetNodeLocation(fmt.Sprintf(\"resources[%d]._value\", i)).ReadSourceString())\n\t}\n}\n"
  },
  {
    "path": "pkg/modules/parser/rules.go",
    "content": "package parser\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"gopkg.in/yaml.v3\"\n)\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\nconst (\n\tRulesBlockName = \"rules\"\n)\n\nfunc (x *YamlFileToModuleParser) parseRulesBlock(rulesBlockKeyNode, rulesBlockValueNode *yaml.Node, diagnostics *schema.Diagnostics) module.RulesBlock {\n\tif x.instruction != nil && x.instruction[\"query\"] == \"\" {\n\t\treturn nil\n\t}\n\t// modules must be an array element\n\tif rulesBlockValueNode.Kind != yaml.SequenceNode {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForArrayType(rulesBlockValueNode, RulesBlockName))\n\t\treturn nil\n\t}\n\n\t// Parse each child element\n\trulesBlock := make(module.RulesBlock, 0)\n\tfor index, moduleNode := range rulesBlockValueNode.Content {\n\t\tblock := x.parseRuleBlock(index, moduleNode, diagnostics)\n\t\tif block != nil {\n\t\t\trulesBlock = append(rulesBlock, block)\n\t\t}\n\t}\n\n\treturn rulesBlock\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\nconst (\n\tRuleBlockNameFieldName      = \"name\"\n\tRuleBlockQueryFieldName     = \"query\"\n\tRuleBlockLabelsFieldName    = \"labels\"\n\tRuleBlockMetadataFieldName  = \"metadata\"\n\tRuleBlockMainTableFieldName = \"main_table\"\n\tRuleBlockOutputFieldName    = \"output\"\n)\n\nfunc (x *YamlFileToModuleParser) parseRuleBlock(index int, ruleBlockNode *yaml.Node, diagnostics *schema.Diagnostics) *module.RuleBlock {\n\n\tblockPath := fmt.Sprintf(\"%s[%d]\", RulesBlockName, index)\n\n\ttoMap, d := x.toMap(ruleBlockNode, blockPath)\n\tdiagnostics.AddDiagnostics(d)\n\tif utils.HasError(d) {\n\t\treturn nil\n\t}\n\n\truleBlock := module.NewRuleBlock()\n\tfor key, entry := range toMap {\n\t\tswitch key {\n\n\t\tcase RuleBlockNameFieldName:\n\t\t\truleBlock.Name = x.parseStringValueWithDiagnosticsAndSetLocation(ruleBlock, RuleBlockNameFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleBlockQueryFieldName:\n\t\t\truleBlock.Query = x.parseStringValueWithDiagnosticsAndSetLocation(ruleBlock, RuleBlockQueryFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleBlockLabelsFieldName:\n\t\t\truleBlock.Labels = x.parseStringMapAndSetLocation(ruleBlock, RuleBlockLabelsFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleBlockMetadataFieldName:\n\t\t\truleBlock.MetadataBlock = x.parseMetadataBlock(index, ruleBlock, entry.key, entry.value, diagnostics)\n\n\t\tcase RuleBlockMainTableFieldName:\n\t\t\truleBlock.MainTable = x.parseStringValueWithDiagnosticsAndSetLocation(ruleBlock, RuleBlockMainTableFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleBlockOutputFieldName:\n\t\t\truleBlock.Output = x.parseStringValueWithDiagnosticsAndSetLocation(ruleBlock, RuleBlockOutputFieldName, entry, blockPath, diagnostics)\n\n\t\tdefault:\n\t\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForUnSupport(entry.key, entry.value, fmt.Sprintf(\"%s.%s\", blockPath, key)))\n\t\t}\n\t}\n\n\tif ruleBlock.IsEmpty() {\n\t\treturn nil\n\t}\n\n\t// set location\n\tx.setLocationKVWithDiagnostics(ruleBlock, \"\", blockPath, newNodeEntry(nil, ruleBlockNode), diagnostics)\n\n\treturn ruleBlock\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\nconst (\n\tRuleMetadataBlockName                 = \"metadata\"\n\tRuleMetadataBlockIdFieldName          = \"id\"\n\tRuleMetadataBlockSeverityFieldName    = \"severity\"\n\tRuleMetadataBlockProviderFieldName    = \"provider\"\n\tRuleMetadataBlockTagsFieldName        = \"tags\"\n\tRuleMetadataBlockAuthorFieldName      = \"author\"\n\tRuleMetadataBlockRemediationFieldName = \"remediation\"\n\tRuleMetadataBlockTitleFieldName       = \"title\"\n\tRuleMetadataBlockDescriptionFieldName = \"description\"\n\tRuleMetadataBlockMainTableFieldName   = \"main_table\"\n)\n\nfunc (x *YamlFileToModuleParser) parseMetadataBlock(ruleIndex int, ruleBlock *module.RuleBlock, metadataBlockKeyNode, metadataBlockValueNode *yaml.Node, diagnostics *schema.Diagnostics) *module.RuleMetadataBlock {\n\n\tblockPath := fmt.Sprintf(\"%s[%d].%s\", RulesBlockName, ruleIndex, RuleMetadataBlockName)\n\n\ttoMap, d := x.toMap(metadataBlockValueNode, blockPath)\n\tdiagnostics.AddDiagnostics(d)\n\tif utils.HasError(d) {\n\t\treturn nil\n\t}\n\n\truleMetadataBlock := module.NewRuleMetadataBlock(ruleBlock)\n\tfor key, entry := range toMap {\n\t\tswitch key {\n\n\t\tcase RuleMetadataBlockIdFieldName:\n\t\t\truleMetadataBlock.Id = x.parseStringValueWithDiagnosticsAndSetLocation(ruleMetadataBlock, RuleMetadataBlockIdFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleMetadataBlockSeverityFieldName:\n\t\t\truleMetadataBlock.Severity = x.parseStringValueWithDiagnosticsAndSetLocation(ruleMetadataBlock, RuleMetadataBlockSeverityFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleMetadataBlockProviderFieldName:\n\t\t\truleMetadataBlock.Provider = x.parseStringValueWithDiagnosticsAndSetLocation(ruleMetadataBlock, RuleMetadataBlockProviderFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleMetadataBlockTagsFieldName:\n\t\t\truleMetadataBlock.Tags = x.parseStringSliceAndSetLocation(ruleMetadataBlock, RuleMetadataBlockTagsFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleMetadataBlockAuthorFieldName:\n\t\t\truleMetadataBlock.Author = x.parseStringValueWithDiagnosticsAndSetLocation(ruleMetadataBlock, RuleMetadataBlockAuthorFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleMetadataBlockRemediationFieldName:\n\t\t\truleMetadataBlock.Remediation = x.parseStringValueWithDiagnosticsAndSetLocation(ruleMetadataBlock, RuleMetadataBlockRemediationFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleMetadataBlockTitleFieldName:\n\t\t\truleMetadataBlock.Title = x.parseStringValueWithDiagnosticsAndSetLocation(ruleMetadataBlock, RuleMetadataBlockTitleFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleMetadataBlockDescriptionFieldName:\n\t\t\truleMetadataBlock.Description = x.parseStringValueWithDiagnosticsAndSetLocation(ruleMetadataBlock, RuleMetadataBlockDescriptionFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RuleMetadataBlockMainTableFieldName:\n\t\t\truleMetadataBlock.MainTable = x.parseStringValueWithDiagnosticsAndSetLocation(ruleMetadataBlock, RuleMetadataBlockMainTableFieldName, entry, blockPath, diagnostics)\n\n\t\tdefault:\n\t\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForUnSupport(entry.key, entry.value, fmt.Sprintf(\"%s.%s\", blockPath, key)))\n\t\t}\n\t}\n\n\tif ruleMetadataBlock.IsEmpty() {\n\t\treturn nil\n\t}\n\n\t// set location\n\tx.setLocationKVWithDiagnostics(ruleMetadataBlock, \"\", blockPath, newNodeEntry(metadataBlockKeyNode, metadataBlockValueNode), diagnostics)\n\n\treturn ruleMetadataBlock\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/parser/rules_test.go",
    "content": "package parser\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestYamlFileToModuleParser_parseRulesBlock(t *testing.T) {\n\tmodule, diagnostics := NewYamlFileToModuleParser(\"./test_data/test_parse_rules/modules.yaml\", make(map[string]interface{})).Parse()\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, module.RulesBlock)\n\n\truleBlock := module.RulesBlock[0]\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"\").ReadSourceString())\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"name._key\").ReadSourceString())\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"name._value\").ReadSourceString())\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"name\").ReadSourceString())\n\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"query._key\").ReadSourceString())\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"query._value\").ReadSourceString())\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"query\").ReadSourceString())\n\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"output._key\").ReadSourceString())\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"output._value\").ReadSourceString())\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"output\").ReadSourceString())\n\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"labels._key\").ReadSourceString())\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"labels._value\").ReadSourceString())\n\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(\"labels\").ReadSourceString())\n\n\tfor key, _ := range ruleBlock.Labels {\n\t\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(fmt.Sprintf(\"labels.%s\", key)).ReadSourceString())\n\t\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(fmt.Sprintf(\"labels.%s._key\", key)).ReadSourceString())\n\t\tassert.NotEmpty(t, ruleBlock.GetNodeLocation(fmt.Sprintf(\"labels.%s._value\", key)).ReadSourceString())\n\t}\n\n\tmetadataBlock := ruleBlock.MetadataBlock\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"._key\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"author._key\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"author._value\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"author\").ReadSourceString())\n\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"description._key\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"description._value\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"description\").ReadSourceString())\n\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"id._key\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"id._value\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"id\").ReadSourceString())\n\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"provider._key\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"provider._value\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"provider\").ReadSourceString())\n\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"remediation._key\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"remediation._value\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"remediation\").ReadSourceString())\n\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"severity._key\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"severity._value\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"severity\").ReadSourceString())\n\n\tfor i := 0; i < len(metadataBlock.Tags); i++ {\n\t\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(fmt.Sprintf(\"tags[%d]\", i)).ReadSourceString())\n\t\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(fmt.Sprintf(\"tags[%d]._value\", i)).ReadSourceString())\n\t}\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"title._key\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"title._value\").ReadSourceString())\n\tassert.NotEmpty(t, metadataBlock.GetNodeLocation(\"title\").ReadSourceString())\n\n}\n"
  },
  {
    "path": "pkg/modules/parser/selefra.go",
    "content": "package parser\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"gopkg.in/yaml.v3\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst (\n\tSelefraBlockFieldName             = \"selefra\"\n\tSelefraBlockOpenaiApiKeyName      = \"openai_api_key\"\n\tSelefraBlockOpenaiModeName        = \"openai_mode\"\n\tSelefraBlockOpenaiLimitName       = \"openai_limit\"\n\tSelefraBlockNameFieldName         = \"name\"\n\tSelefraBlockCLIVersionFieldName   = \"cli_version\"\n\tSelefraBlockLogLevelFieldName     = \"log_level\"\n\tSelefraRequiredProvidersBlockName = \"providers\"\n\tSelefraConnectionsBlockName       = \"connection\"\n\tSelefraCloudBlockName             = \"cloud\"\n)\n\nfunc (x *YamlFileToModuleParser) parseSelefraBlock(selefraBlockKeyNode, selefraBlockValueNode *yaml.Node, diagnostics *schema.Diagnostics) *module.SelefraBlock {\n\n\tblockPath := SelefraBlockFieldName\n\n\t// type check\n\tif selefraBlockValueNode.Kind != yaml.MappingNode {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForMappingType(selefraBlockValueNode, blockPath))\n\t\treturn nil\n\t}\n\n\ttoMap, d := x.toMap(selefraBlockValueNode, blockPath)\n\tdiagnostics.AddDiagnostics(d)\n\tif utils.HasError(d) {\n\t\treturn nil\n\t}\n\n\tselefraBlock := module.NewSelefraBlock()\n\tfor key, entry := range toMap {\n\t\tswitch key {\n\n\t\tcase SelefraBlockNameFieldName:\n\t\t\tselefraBlock.Name = x.parseStringValueWithDiagnosticsAndSetLocation(selefraBlock, SelefraBlockNameFieldName, entry, blockPath, diagnostics)\n\n\t\tcase SelefraBlockOpenaiApiKeyName:\n\t\t\tselefraBlock.OpenaiApiKey = x.parseStringValueWithDiagnosticsAndSetLocation(selefraBlock, SelefraBlockOpenaiApiKeyName, entry, blockPath, diagnostics)\n\n\t\tcase SelefraBlockOpenaiModeName:\n\t\t\tselefraBlock.OpenaiMode = x.parseStringValueWithDiagnosticsAndSetLocation(selefraBlock, SelefraBlockOpenaiModeName, entry, blockPath, diagnostics)\n\n\t\tcase SelefraBlockOpenaiLimitName:\n\t\t\tselefraBlock.OpenaiLimit = *x.parseUintValueWithDiagnosticsAndSetLocation(selefraBlock, SelefraBlockOpenaiLimitName, entry, blockPath, diagnostics)\n\n\t\tcase SelefraBlockCLIVersionFieldName:\n\t\t\tselefraBlock.CliVersion = x.parseStringValueWithDiagnosticsAndSetLocation(selefraBlock, SelefraBlockCLIVersionFieldName, entry, blockPath, diagnostics)\n\n\t\tcase SelefraBlockLogLevelFieldName:\n\t\t\tselefraBlock.LogLevel = x.parseStringValueWithDiagnosticsAndSetLocation(selefraBlock, SelefraBlockLogLevelFieldName, entry, blockPath, diagnostics)\n\n\t\tcase SelefraCloudBlockName:\n\t\t\tselefraBlock.CloudBlock = x.parseCloudBlock(entry.key, entry.value, diagnostics)\n\n\t\tcase SelefraRequiredProvidersBlockName:\n\t\t\tselefraBlock.RequireProvidersBlock = x.parseRequiredProvidersBlock(entry.key, entry.value, diagnostics)\n\n\t\tcase SelefraConnectionsBlockName:\n\t\t\tselefraBlock.ConnectionBlock = x.parseConnectionBlock(entry.key, entry.value, diagnostics)\n\n\t\tdefault:\n\t\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForUnSupport(entry.key, entry.value, fmt.Sprintf(\"%s.%s\", blockPath, key)))\n\t\t}\n\t}\n\n\tif selefraBlock.IsEmpty() {\n\t\treturn nil\n\t}\n\n\t// set code location\n\tx.setLocationKVWithDiagnostics(selefraBlock, \"\", blockPath, newNodeEntry(selefraBlockKeyNode, selefraBlockValueNode), diagnostics)\n\n\treturn selefraBlock\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst (\n\tCloudBlockProjectFieldName      = \"project\"\n\tCloudBlockOrganizationFieldName = \"organization\"\n\tCloudBlockHostnameFieldName     = \"hostname\"\n)\n\nfunc (x *YamlFileToModuleParser) parseCloudBlock(cloudBlockKeyNode, cloudBlockValueNode *yaml.Node, diagnostics *schema.Diagnostics) *module.CloudBlock {\n\n\tblockPath := fmt.Sprintf(\"%s.%s\", SelefraBlockFieldName, \"cloud\")\n\n\t// type check\n\ttoMap, d := x.toMap(cloudBlockValueNode, blockPath)\n\tdiagnostics.AddDiagnostics(d)\n\tif utils.HasError(d) {\n\t\treturn nil\n\t}\n\n\tcloudBlock := module.NewCloudBlock()\n\tfor key, entry := range toMap {\n\t\tswitch key {\n\t\tcase CloudBlockProjectFieldName:\n\t\t\tcloudBlock.Project = x.parseStringValueWithDiagnosticsAndSetLocation(cloudBlock, CloudBlockProjectFieldName, entry, blockPath, diagnostics)\n\n\t\tcase CloudBlockOrganizationFieldName:\n\t\t\tcloudBlock.Organization = x.parseStringValueWithDiagnosticsAndSetLocation(cloudBlock, CloudBlockOrganizationFieldName, entry, blockPath, diagnostics)\n\n\t\tcase CloudBlockHostnameFieldName:\n\t\t\tcloudBlock.HostName = x.parseStringValueWithDiagnosticsAndSetLocation(cloudBlock, CloudBlockHostnameFieldName, entry, blockPath, diagnostics)\n\n\t\tdefault:\n\t\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForUnSupport(entry.key, entry.value, fmt.Sprintf(\"%s.%s\", blockPath, key)))\n\n\t\t}\n\t}\n\n\tif cloudBlock.IsEmpty() {\n\t\treturn nil\n\t}\n\n\t// set code location\n\tx.setLocationKVWithDiagnostics(cloudBlock, \"\", blockPath, newNodeEntry(cloudBlockKeyNode, cloudBlockValueNode), diagnostics)\n\n\treturn cloudBlock\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst (\n\tRequiredProviderBlockNameFieldName    = \"name\"\n\tRequiredProviderBlockSourceFieldName  = \"source\"\n\tRequiredProviderBlockVersionFieldName = \"version\"\n\tRequiredProviderBlockPathFieldName    = \"path\"\n)\n\nfunc (x *YamlFileToModuleParser) parseRequiredProvidersBlock(requiredProviderBlockKeyNode, requiredProviderBlockValueNode *yaml.Node, diagnostics *schema.Diagnostics) module.RequireProvidersBlock {\n\n\tblockPath := fmt.Sprintf(\"%s.%s\", SelefraBlockFieldName, SelefraRequiredProvidersBlockName)\n\n\tif requiredProviderBlockValueNode.Kind != yaml.SequenceNode {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForArrayType(requiredProviderBlockKeyNode, blockPath))\n\t\treturn nil\n\t}\n\n\trequiredProvidersBlock := make(module.RequireProvidersBlock, 0)\n\tfor index, requiredProviderNode := range requiredProviderBlockValueNode.Content {\n\t\tp := x.parseRequiredProviderBlock(index, requiredProviderNode, diagnostics)\n\t\tif p != nil {\n\t\t\trequiredProvidersBlock = append(requiredProvidersBlock, p)\n\t\t}\n\t}\n\n\tif len(requiredProvidersBlock) == 0 {\n\t\treturn nil\n\t}\n\treturn requiredProvidersBlock\n}\n\nfunc (x *YamlFileToModuleParser) parseRequiredProviderBlock(index int, node *yaml.Node, diagnostics *schema.Diagnostics) *module.RequireProviderBlock {\n\n\tblockPath := fmt.Sprintf(\"%s.%s[%d]\", SelefraBlockFieldName, SelefraRequiredProvidersBlockName, index)\n\n\ttoMap, d := x.toMap(node, blockPath)\n\tdiagnostics.AddDiagnostics(d)\n\tif utils.HasError(d) {\n\t\treturn nil\n\t}\n\n\trequiredProviderBlock := module.NewRequireProviderBlock()\n\tfor key, entry := range toMap {\n\t\tswitch key {\n\n\t\tcase RequiredProviderBlockNameFieldName:\n\t\t\trequiredProviderBlock.Name = x.parseStringValueWithDiagnosticsAndSetLocation(requiredProviderBlock, RequiredProviderBlockNameFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RequiredProviderBlockSourceFieldName:\n\t\t\trequiredProviderBlock.Source = x.parseStringValueWithDiagnosticsAndSetLocation(requiredProviderBlock, RequiredProviderBlockSourceFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RequiredProviderBlockVersionFieldName:\n\t\t\trequiredProviderBlock.Version = x.parseStringValueWithDiagnosticsAndSetLocation(requiredProviderBlock, RequiredProviderBlockVersionFieldName, entry, blockPath, diagnostics)\n\n\t\tcase RequiredProviderBlockPathFieldName:\n\t\t\trequiredProviderBlock.Path = x.parseStringValueWithDiagnosticsAndSetLocation(requiredProviderBlock, RequiredProviderBlockPathFieldName, entry, blockPath, diagnostics)\n\n\t\tdefault:\n\t\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForUnSupport(entry.key, entry.value, fmt.Sprintf(\"%s.%s\", blockPath, key)))\n\n\t\t}\n\t}\n\n\tif requiredProviderBlock.IsEmpty() {\n\t\treturn nil\n\t}\n\n\t// set location\n\tx.setLocationKVWithDiagnostics(requiredProviderBlock, \"\", blockPath, newNodeEntry(nil, node), diagnostics)\n\n\treturn requiredProviderBlock\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst (\n\tConnectionBlockTypeFieldName     = \"type\"\n\tConnectionBlockUsernameFieldName = \"username\"\n\tConnectionBlockPasswordFieldName = \"password\"\n\tConnectionBlockHostFieldName     = \"host\"\n\tConnectionBlockPortFieldName     = \"port\"\n\tConnectionBlockDatabaseFieldName = \"database\"\n\tConnectionBlockSSLModeFieldName  = \"sslmode\"\n\tConnectionBlockExtrasFieldName   = \"extras\"\n)\n\nfunc (x *YamlFileToModuleParser) parseConnectionBlock(connectionBlockKeyNode, connectionBlockValueNode *yaml.Node, diagnostics *schema.Diagnostics) *module.ConnectionBlock {\n\n\tblockPath := fmt.Sprintf(\"%s.%s\", SelefraBlockFieldName, SelefraConnectionsBlockName)\n\n\t// type check\n\ttoMap, d := x.toMap(connectionBlockValueNode, blockPath)\n\tdiagnostics.AddDiagnostics(d)\n\tif utils.HasError(d) {\n\t\treturn nil\n\t}\n\n\tconnectionBlock := module.NewConnectionBlock()\n\tfor key, entry := range toMap {\n\t\tswitch key {\n\n\t\tcase ConnectionBlockTypeFieldName:\n\t\t\tconnectionBlock.Type = x.parseStringValueWithDiagnosticsAndSetLocation(connectionBlock, ConnectionBlockTypeFieldName, entry, blockPath, diagnostics)\n\n\t\tcase ConnectionBlockUsernameFieldName:\n\t\t\tconnectionBlock.Username = x.parseStringValueWithDiagnosticsAndSetLocation(connectionBlock, ConnectionBlockUsernameFieldName, entry, blockPath, diagnostics)\n\n\t\tcase ConnectionBlockPasswordFieldName:\n\t\t\tconnectionBlock.Password = x.parseStringValueWithDiagnosticsAndSetLocation(connectionBlock, ConnectionBlockPasswordFieldName, entry, blockPath, diagnostics)\n\n\t\tcase ConnectionBlockHostFieldName:\n\t\t\tconnectionBlock.Host = x.parseStringValueWithDiagnosticsAndSetLocation(connectionBlock, ConnectionBlockHostFieldName, entry, blockPath, diagnostics)\n\n\t\tcase ConnectionBlockPortFieldName:\n\t\t\tconnectionBlock.Port = x.parseUintValueWithDiagnosticsAndSetLocation(connectionBlock, ConnectionBlockPortFieldName, entry, blockPath, diagnostics)\n\n\t\tcase ConnectionBlockDatabaseFieldName:\n\t\t\tconnectionBlock.Database = x.parseStringValueWithDiagnosticsAndSetLocation(connectionBlock, ConnectionBlockDatabaseFieldName, entry, blockPath, diagnostics)\n\n\t\tcase ConnectionBlockSSLModeFieldName:\n\t\t\tconnectionBlock.SSLMode = x.parseStringValueWithDiagnosticsAndSetLocation(connectionBlock, ConnectionBlockSSLModeFieldName, entry, blockPath, diagnostics)\n\n\t\tcase ConnectionBlockExtrasFieldName:\n\t\t\tconnectionBlock.Extras = x.parseStringSliceAndSetLocation(connectionBlock, ConnectionBlockExtrasFieldName, newNodeEntry(nil, entry.value), blockPath, diagnostics)\n\t\t}\n\t}\n\n\tif connectionBlock.IsEmpty() {\n\t\treturn nil\n\t}\n\n\t// set location\n\tx.setLocationKVWithDiagnostics(connectionBlock, \"\", blockPath, newNodeEntry(connectionBlockKeyNode, connectionBlockValueNode), diagnostics)\n\n\treturn connectionBlock\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/parser/selefra_test.go",
    "content": "package parser\n\nimport (\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestYamlFileToModuleParser_parseSelefraBlock(t *testing.T) {\n\tmodule, diagnostics := NewYamlFileToModuleParser(\"./test_data/test_parse_selefra/modules.yaml\").Parse()\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, module.SelefraBlock)\n\n\tselefraBlock := module.SelefraBlock\n\tassert.NotEmpty(t, selefraBlock.GetNodeLocation(\"\").ReadSourceString())\n\tassert.NotEmpty(t, selefraBlock.GetNodeLocation(\"._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, selefraBlock.GetNodeLocation(\"name\").ReadSourceString())\n\tassert.NotEmpty(t, selefraBlock.GetNodeLocation(\"name._key\").ReadSourceString())\n\tassert.NotEmpty(t, selefraBlock.GetNodeLocation(\"name._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, selefraBlock.GetNodeLocation(\"cli_version\").ReadSourceString())\n\tassert.NotEmpty(t, selefraBlock.GetNodeLocation(\"cli_version._key\").ReadSourceString())\n\tassert.NotEmpty(t, selefraBlock.GetNodeLocation(\"cli_version._value\").ReadSourceString())\n\n\t// ------------------------------------------------- --------------------------------------------------------------------\n\n\tcloudBlock := selefraBlock.CloudBlock\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"\").ReadSourceString())\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"._key\").ReadSourceString())\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"project\").ReadSourceString())\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"project._key\").ReadSourceString())\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"project._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"organization\").ReadSourceString())\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"organization._key\").ReadSourceString())\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"organization._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"hostname\").ReadSourceString())\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"hostname._key\").ReadSourceString())\n\tassert.NotEmpty(t, cloudBlock.GetNodeLocation(\"hostname._value\").ReadSourceString())\n\n\t// ------------------------------------------------- --------------------------------------------------------------------\n\n\tconnectionBlock := selefraBlock.ConnectionBlock\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"._key\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"type\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"type._key\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"type._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"username\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"username._key\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"username._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"password\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"password._key\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"password._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"host\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"host._key\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"host._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"port\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"port._key\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"port._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"database\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"database._key\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"database._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"sslmode\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"sslmode._key\").ReadSourceString())\n\tassert.NotEmpty(t, connectionBlock.GetNodeLocation(\"sslmode._value\").ReadSourceString())\n\n\t// ------------------------------------------------- --------------------------------------------------------------------\n\n\trequireProvidersBlock := selefraBlock.RequireProvidersBlock\n\tfor _, requireProviderBlock := range requireProvidersBlock {\n\n\t\tassert.NotEmpty(t, requireProviderBlock.GetNodeLocation(\"\").ReadSourceString())\n\t\tassert.NotEmpty(t, requireProviderBlock.GetNodeLocation(\"._value\").ReadSourceString())\n\n\t\tassert.NotEmpty(t, requireProviderBlock.GetNodeLocation(\"name\").ReadSourceString())\n\t\tassert.NotEmpty(t, requireProviderBlock.GetNodeLocation(\"name._key\").ReadSourceString())\n\t\tassert.NotEmpty(t, requireProviderBlock.GetNodeLocation(\"name._value\").ReadSourceString())\n\n\t\tassert.NotEmpty(t, requireProviderBlock.GetNodeLocation(\"source\").ReadSourceString())\n\t\tassert.NotEmpty(t, requireProviderBlock.GetNodeLocation(\"source._key\").ReadSourceString())\n\t\tassert.NotEmpty(t, requireProviderBlock.GetNodeLocation(\"source._value\").ReadSourceString())\n\n\t\tassert.NotEmpty(t, requireProviderBlock.GetNodeLocation(\"version\").ReadSourceString())\n\t\tassert.NotEmpty(t, requireProviderBlock.GetNodeLocation(\"version._key\").ReadSourceString())\n\t\tassert.NotEmpty(t, requireProviderBlock.GetNodeLocation(\"version._value\").ReadSourceString())\n\n\t}\n\n\t// ------------------------------------------------- --------------------------------------------------------------------\n\n}\n"
  },
  {
    "path": "pkg/modules/parser/test_data/test_modules.yaml",
    "content": "rules:\n  - name: bucket_is_not_configured_with_cors_rules\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\"\n\nselefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\"\n\nmodules:\n  - name: Misconfiguration-S3\n    uses:\n      - rules/s3/bucket_acl_publicly_readable.yaml\n      - rules/s3/bucket_acl_publicly_writeable.yaml\n      - rules/s3/bucket_allow_http_access.yaml\n      - rules/s3/bucket_default_encryption_disable.yaml\n      - rules/s3/bucket_is_not_configured_with_cors_rules.yaml\n      - rules/s3/bucket_logging_disable.yaml\n      - rules/s3/bucket_not_configured_block_public_access.yaml\n      - rules/s3/bucket_object_traversal_by_acl.yaml\n      - rules/s3/bucket_object_traversal_by_policy.yaml\n      - rules/s3/bucket_publicly_readable.yaml\n      - rules/s3/bucket_publicly_writeable.yaml\n      - rules/s3/bucket_source_ip_not_set.yaml\n      - rules/s3/bucket_versioning_is_disabled.yaml\n      - rules/s3/mfa_delete_is_disable.yaml\n      - rules/s3/s3_account_level_public_access_not_blocks.yaml\n      - rules/s3/s3_bucket_blacklisted_actions_prohibited.yaml\n      - rules/s3/s3_bucket_replication_disabled.yaml\n      - rules/s3/s3_is_not_protected_by_backup_plan.yaml\n      - rules/s3/s3_last_backup_recovery_point.yaml\n      - rules/s3/s3_not_default_encryption_kms.yaml\n      - rules/s3/s3_version_lifecycle_policy_not_check.yaml\n      - rules/s3/the_target_bucket_for_server_access_logging_is_the_bucket_itself.yaml\n  - name: example_module\n    uses: ./rules/\n    input:\n      name: selefra\n\nvariables:\n  - key: test\n    default:\n      a: 1\n      b: 1\n      c: 1\n\nproviders:\n  - name: aws_01\n    cache: 1d\n    provider: aws\n    resources:\n      - aws_s3_buckets\n      - aws_s3_accounts\n    #  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n    accounts:\n      #     Optional. User identification\n      - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n        #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n        shared_config_profile: < PROFILE_NAME >\n        #    Optional. Location of shared configuration files\n        shared_config_files:\n          - <FILE_PATH>\n        #   Optional. Location of shared credentials files\n        shared_credentials_files:\n          - <FILE_PATH>\n        #    Optional. Role ARN we want to assume when accessing this account\n        role_arn: < YOUR_ROLE_ARN >\n        #    Optional. Named role session to grab specific operation under the assumed role\n        role_session_name: <SESSION_NAME>\n        #    Optional. Any outside of the org account id that has additional control\n        external_id: <ID>\n        #    Optional. Designated region of servers\n        default_region: <REGION_CODE>\n        #    Optional. by default assumes all regions\n        regions:\n          - us-east-1\n          - us-west-2\n    #    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n    max_attempts: 10\n    #    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n    max_backoff: 30"
  },
  {
    "path": "pkg/modules/parser/test_data/test_parse_modules/modules.yaml",
    "content": "modules:\n  - name: Misconfiguration-S3\n    uses:\n      - rules/s3/bucket_acl_publicly_readable.yaml\n      - rules/s3/bucket_acl_publicly_writeable.yaml\n      - rules/s3/bucket_allow_http_access.yaml\n      - rules/s3/bucket_default_encryption_disable.yaml\n      - rules/s3/bucket_is_not_configured_with_cors_rules.yaml\n      - rules/s3/bucket_logging_disable.yaml\n      - rules/s3/bucket_not_configured_block_public_access.yaml\n      - rules/s3/bucket_object_traversal_by_acl.yaml\n      - rules/s3/bucket_object_traversal_by_policy.yaml\n      - rules/s3/bucket_publicly_readable.yaml\n      - rules/s3/bucket_publicly_writeable.yaml\n      - rules/s3/bucket_source_ip_not_set.yaml\n      - rules/s3/bucket_versioning_is_disabled.yaml\n      - rules/s3/mfa_delete_is_disable.yaml\n      - rules/s3/s3_account_level_public_access_not_blocks.yaml\n      - rules/s3/s3_bucket_blacklisted_actions_prohibited.yaml\n      - rules/s3/s3_bucket_replication_disabled.yaml\n      - rules/s3/s3_is_not_protected_by_backup_plan.yaml\n      - rules/s3/s3_last_backup_recovery_point.yaml\n      - rules/s3/s3_not_default_encryption_kms.yaml\n      - rules/s3/s3_version_lifecycle_policy_not_check.yaml\n      - rules/s3/the_target_bucket_for_server_access_logging_is_the_bucket_itself.yaml\n  - name: example_module\n    uses: ./rules/\n    input:\n      name: selefra\n"
  },
  {
    "path": "pkg/modules/parser/test_data/test_parse_providers/modules.yaml",
    "content": "providers:\n  - name: aws_01\n    cache: 1d\n    provider: aws\n    resources:\n      - aws_s3_buckets\n      - aws_s3_accounts\n    #  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n    accounts:\n      #     Optional. User identification\n      - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n        #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n        shared_config_profile: < PROFILE_NAME >\n        #    Optional. Location of shared configuration files\n        shared_config_files:\n          - <FILE_PATH>\n        #   Optional. Location of shared credentials files\n        shared_credentials_files:\n          - <FILE_PATH>\n        #    Optional. Role ARN we want to assume when accessing this account\n        role_arn: < YOUR_ROLE_ARN >\n        #    Optional. Named role session to grab specific operation under the assumed role\n        role_session_name: <SESSION_NAME>\n        #    Optional. Any outside of the org account id that has additional control\n        external_id: <ID>\n        #    Optional. Designated region of servers\n        default_region: <REGION_CODE>\n        #    Optional. by default assumes all regions\n        regions:\n          - us-east-1\n          - us-west-2\n    #    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n    max_attempts: 10\n    #    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n    max_backoff: 30"
  },
  {
    "path": "pkg/modules/parser/test_data/test_parse_rules/modules.yaml",
    "content": "rules:\n  - name: bucket_is_not_configured_with_cors_rules\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\"\n"
  },
  {
    "path": "pkg/modules/parser/test_data/test_parse_selefra/modules.yaml",
    "content": "selefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\"\n"
  },
  {
    "path": "pkg/modules/parser/test_data/test_parse_variables/modules.yaml",
    "content": "variables:\n  - key: test\n    default:\n      a: 1\n      b: 1\n      c: 1"
  },
  {
    "path": "pkg/modules/parser/variables.go",
    "content": "package parser\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/reflect_util\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"gopkg.in/yaml.v3\"\n)\n\nconst VariablesBlockName = \"variables\"\n\nfunc (x *YamlFileToModuleParser) parseVariablesBlock(variablesBlockKeyNode, variableBlockValueNode *yaml.Node, diagnostics *schema.Diagnostics) module.VariablesBlock {\n\n\tblockPath := VariablesBlockName\n\n\t// variables must be an array element\n\tif variableBlockValueNode.Kind != yaml.SequenceNode {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForArrayType(variableBlockValueNode, blockPath))\n\t\treturn nil\n\t}\n\n\t// Parse each child element\n\tvariables := make(module.VariablesBlock, 0)\n\tfor index, variableNode := range variableBlockValueNode.Content {\n\t\tblock := x.parseVariableBlock(index, variableNode, diagnostics)\n\t\tif block != nil {\n\t\t\tvariables = append(variables, block)\n\t\t}\n\t}\n\n\tif len(variables) == 0 {\n\t\treturn nil\n\t}\n\treturn variables\n}\n\nconst (\n\tVariableBlockKeyFieldName         = \"key\"\n\tVariableBlockDefaultFieldName     = \"default\"\n\tVariableBlockDescriptionFieldName = \"description\"\n\tVariableBlockAuthorFieldName      = \"author\"\n)\n\nfunc (x *YamlFileToModuleParser) parseVariableBlock(index int, node *yaml.Node, diagnostics *schema.Diagnostics) *module.VariableBlock {\n\n\tblockPath := fmt.Sprintf(\"%s[%d]\", VariablesBlockName, index)\n\n\ttoMap, d := x.toMap(node, blockPath)\n\tdiagnostics.AddDiagnostics(d)\n\tif d != nil && d.HasError() {\n\t\treturn nil\n\t}\n\n\tvariableBlock := module.NewVariableBlock()\n\tfor key, entry := range toMap {\n\t\tswitch key {\n\n\t\tcase VariableBlockKeyFieldName:\n\t\t\tvariableBlock.Key = x.parseStringValueWithDiagnosticsAndSetLocation(variableBlock, VariableBlockKeyFieldName, entry, blockPath, diagnostics)\n\n\t\tcase VariableBlockDefaultFieldName:\n\t\t\tfieldSelector := fmt.Sprintf(\"%s.%s\", blockPath, VariableBlockDefaultFieldName)\n\t\t\tanyValue, d := x.parseAny(entry.value, fieldSelector)\n\t\t\tdiagnostics.AddDiagnostics(d)\n\t\t\tif !reflect_util.IsNil(anyValue) {\n\t\t\t\tvariableBlock.Default = anyValue\n\t\t\t}\n\t\t\t// set location\n\t\t\tx.setLocationKVWithDiagnostics(variableBlock, VariableBlockDefaultFieldName, fieldSelector, entry, diagnostics)\n\n\t\tcase VariableBlockDescriptionFieldName:\n\t\t\tvariableBlock.Description = x.parseStringValueWithDiagnosticsAndSetLocation(variableBlock, VariableBlockDescriptionFieldName, entry, blockPath, diagnostics)\n\n\t\tcase VariableBlockAuthorFieldName:\n\t\t\tvariableBlock.Author = x.parseStringValueWithDiagnosticsAndSetLocation(variableBlock, VariableBlockAuthorFieldName, entry, blockPath, diagnostics)\n\n\t\tdefault:\n\t\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForUnSupport(entry.key, entry.value, fmt.Sprintf(\"%s.%s\", blockPath, key)))\n\t\t}\n\t}\n\n\tif variableBlock.IsEmpty() {\n\t\treturn nil\n\t}\n\n\t// set location\n\tx.setLocationKVWithDiagnostics(variableBlock, \"\", blockPath, newNodeEntry(nil, node), diagnostics)\n\n\treturn variableBlock\n}\n"
  },
  {
    "path": "pkg/modules/parser/variables_test.go",
    "content": "package parser\n\nimport (\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestYamlFileToModuleParser_parseVariablesBlock(t *testing.T) {\n\tmodule, diagnostics := NewYamlFileToModuleParser(\"./test_data/test_parse_variables/modules.yaml\").Parse()\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, module.VariablesBlock)\n\n\tvariableBLock := module.VariablesBlock[0]\n\tassert.NotEmpty(t, variableBLock.GetNodeLocation(\"\").ReadSourceString())\n\tassert.NotEmpty(t, variableBLock.GetNodeLocation(\"._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, variableBLock.GetNodeLocation(\"key\").ReadSourceString())\n\tassert.NotEmpty(t, variableBLock.GetNodeLocation(\"key._key\").ReadSourceString())\n\tassert.NotEmpty(t, variableBLock.GetNodeLocation(\"key._value\").ReadSourceString())\n\n\tassert.NotEmpty(t, variableBLock.GetNodeLocation(\"default\").ReadSourceString())\n\tassert.NotEmpty(t, variableBLock.GetNodeLocation(\"default._key\").ReadSourceString())\n\tassert.NotEmpty(t, variableBLock.GetNodeLocation(\"default._value\").ReadSourceString())\n\n}\n"
  },
  {
    "path": "pkg/modules/parser/yaml_file_to_module_parser.go",
    "content": "package parser\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/songzhibin97/gkit/tools/pointer\"\n\t\"gopkg.in/yaml.v3\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// YamlFileToModuleParser Read a yaml file as a module, but the module is only for program convenience. There is no such file module; a module should at least be a folder\ntype YamlFileToModuleParser struct {\n\tyamlFilePath string\n\tinstruction  map[string]interface{}\n}\n\nfunc NewYamlFileToModuleParser(yamlFilePath string, instruction map[string]interface{}) *YamlFileToModuleParser {\n\treturn &YamlFileToModuleParser{\n\t\tyamlFilePath: yamlFilePath,\n\t\tinstruction:  instruction,\n\t}\n}\n\nfunc (x *YamlFileToModuleParser) Parse() (*module.Module, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// 1. read yaml file\n\tyamlFileBytes, err := os.ReadFile(x.yamlFilePath)\n\tif err != nil {\n\t\treturn nil, diagnostics.AddErrorMsg(\"YamlParserError, read yaml file %s error: %s\", x.yamlFilePath, err.Error())\n\t}\n\n\tdocumentNode := &yaml.Node{}\n\terr = yaml.Unmarshal(yamlFileBytes, documentNode)\n\tif err != nil {\n\t\treturn nil, diagnostics.AddErrorMsg(\"yaml file %s unmarshal error: %s\", x.yamlFilePath, err.Error())\n\t}\n\n\tif documentNode.Kind != yaml.DocumentNode {\n\t\treturn nil, diagnostics.AddErrorMsg(\"yaml file %s unmarshal error, not have document node\", x.yamlFilePath)\n\t}\n\n\tyamlFileModule := &module.Module{}\n\trootContent := documentNode.Content[0].Content\n\tfor index := 0; index < len(rootContent); index += 2 {\n\t\tkey := rootContent[index]\n\t\tvalue := rootContent[index+1]\n\t\tswitch key.Value {\n\t\tcase SelefraBlockFieldName:\n\t\t\tyamlFileModule.SelefraBlock = x.parseSelefraBlock(key, value, diagnostics)\n\t\t\tif x.instruction != nil {\n\t\t\t\tif x.instruction[\"openai_api_key\"] != nil && x.instruction[\"openai_api_key\"] != \"\" {\n\t\t\t\t\tyamlFileModule.SelefraBlock.OpenaiApiKey = x.instruction[\"openai_api_key\"].(string)\n\t\t\t\t}\n\t\t\t\tif x.instruction[\"openai_mode\"] != nil && x.instruction[\"openai_mode\"] != \"\" {\n\t\t\t\t\tyamlFileModule.SelefraBlock.OpenaiMode = x.instruction[\"openai_mode\"].(string)\n\t\t\t\t}\n\t\t\t\tif x.instruction[\"openai_limit\"] != nil && x.instruction[\"openai_limit\"] != 0 {\n\t\t\t\t\tyamlFileModule.SelefraBlock.OpenaiLimit = x.instruction[\"openai_limit\"].(uint64)\n\t\t\t\t}\n\t\t\t}\n\t\tcase VariablesBlockName:\n\t\t\tyamlFileModule.VariablesBlock = x.parseVariablesBlock(key, value, diagnostics)\n\t\tcase ProvidersBlockName:\n\t\t\tyamlFileModule.ProvidersBlock = x.parseProvidersBlock(key, value, diagnostics)\n\t\tcase ModulesBlockName:\n\t\t\tyamlFileModule.ModulesBlock = x.parseModulesBlock(key, value, diagnostics)\n\t\tcase RulesBlockName:\n\t\t\tif x.instruction != nil {\n\t\t\t\tif x.instruction[\"query\"] != nil {\n\t\t\t\t\tyamlFileModule.RulesBlock = module.RulesBlock{\n\t\t\t\t\t\t&module.RuleBlock{\n\t\t\t\t\t\t\tName:   \"CloudChat\",\n\t\t\t\t\t\t\tLabels: map[string]interface{}{\"Initiator\": \"GPT\"},\n\t\t\t\t\t\t\tMetadataBlock: &module.RuleMetadataBlock{\n\t\t\t\t\t\t\t\tId:          \"GPT Rule\",\n\t\t\t\t\t\t\t\tTitle:       \"GPT mode automatic analysis\",\n\t\t\t\t\t\t\t\tSeverity:    \"Low\",\n\t\t\t\t\t\t\t\tAuthor:      \"Selefra\",\n\t\t\t\t\t\t\t\tRemediation: \"In GPT mode, it will automatically analyze whether there is risk information according to the query content.\",\n\t\t\t\t\t\t\t\tDescription: \"In GPT mode, it will automatically analyze whether there is risk information according to the query content.\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tQuery:  x.instruction[\"query\"].(string),\n\t\t\t\t\t\t\tOutput: \"{{.resource}},{{.title}}\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tyamlFileModule.RulesBlock = x.parseRulesBlock(key, value, diagnostics)\n\t\t}\n\t}\n\treturn yamlFileModule, diagnostics\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *YamlFileToModuleParser) parseUintValueWithDiagnosticsAndSetLocation(block module.Block, fieldName string, entry *nodeEntry, blockBasePath string, diagnostics *schema.Diagnostics) *uint64 {\n\tvalueInteger := x.parseUintWithDiagnostics(entry.value, blockBasePath+\".\"+fieldName, diagnostics)\n\n\tif entry.key != nil {\n\t\tx.setLocationWithDiagnostics(block, fieldName+module.NodeLocationSelfKey, blockBasePath, entry.key, diagnostics)\n\t}\n\n\tx.setLocationWithDiagnostics(block, fieldName+module.NodeLocationSelfValue, blockBasePath, entry.value, diagnostics)\n\n\treturn valueInteger\n}\n\nfunc (x *YamlFileToModuleParser) parseFilterValueWithDiagnosticsAndSetLocation(block module.Block, fieldName string, entry *nodeEntry, blockBasePath string, diagnostics *schema.Diagnostics) []module.Filter {\n\tfilters := x.parseFilterWithDiagnostics(entry.value, blockBasePath+\".\"+fieldName, diagnostics)\n\n\treturn filters\n}\n\nfunc (x *YamlFileToModuleParser) parseStringValueWithDiagnosticsAndSetLocation(block module.Block, fieldName string, entry *nodeEntry, blockBasePath string, diagnostics *schema.Diagnostics) string {\n\tvalueString := x.parseStringWithDiagnostics(entry.value, blockBasePath+\".\"+fieldName, diagnostics)\n\n\tif entry.key != nil {\n\t\tx.setLocationWithDiagnostics(block, fieldName+module.NodeLocationSelfKey, blockBasePath, entry.key, diagnostics)\n\t}\n\n\tx.setLocationWithDiagnostics(block, fieldName+module.NodeLocationSelfValue, blockBasePath, entry.value, diagnostics)\n\n\treturn valueString\n}\n\nfunc (x *YamlFileToModuleParser) parseInterfaceValueWithDiagnosticsAndSetLocation(block module.Block, fieldName string, entry *nodeEntry, blockBasePath string, diagnostics *schema.Diagnostics) interface{} {\n\tvalueString := x.parseInterfaceWithDiagnostics(entry.value, blockBasePath+\".\"+fieldName, diagnostics)\n\n\tif entry.key != nil {\n\t\tx.setLocationWithDiagnostics(block, fieldName+module.NodeLocationSelfKey, blockBasePath, entry.key, diagnostics)\n\t}\n\n\tx.setLocationWithDiagnostics(block, fieldName+module.NodeLocationSelfValue, blockBasePath, entry.value, diagnostics)\n\n\treturn valueString\n}\n\n// Parse node as a string slice\nfunc (x *YamlFileToModuleParser) parseStringSlice(node *yaml.Node, blockPath string) ([]string, *schema.Diagnostics) {\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// modules must be an array element\n\tif node.Kind != yaml.SequenceNode {\n\t\treturn nil, x.buildNodeErrorMsgForArrayType(node, blockPath)\n\t}\n\n\telementSlice := make([]string, 0)\n\tfor index, elementNode := range node.Content {\n\t\tuseNodeValue := x.parseStringWithDiagnostics(elementNode, fmt.Sprintf(\"%s[%d]\", blockPath, index), diagnostics)\n\t\tif useNodeValue != \"\" {\n\t\t\telementSlice = append(elementSlice, useNodeValue)\n\t\t}\n\t}\n\treturn elementSlice, diagnostics\n}\n\nfunc (x *YamlFileToModuleParser) parseStringSliceAndSetLocation(block module.Block, fieldName string, entry *nodeEntry, blockBasePath string, diagnostics *schema.Diagnostics) []string {\n\n\tblockPath := blockBasePath + \".\" + fieldName\n\n\telementSlice := make([]string, 0)\n\tswitch entry.value.Kind {\n\tcase yaml.SequenceNode:\n\t\tfor index, elementNode := range entry.value.Content {\n\t\t\telementFullPath := fmt.Sprintf(\"%s[%d]\", blockPath, index)\n\t\t\tuseNodeValue := x.parseStringWithDiagnostics(elementNode, elementFullPath, diagnostics)\n\t\t\tif useNodeValue != \"\" {\n\n\t\t\t\telementSlice = append(elementSlice, useNodeValue)\n\n\t\t\t\trelativePath := fmt.Sprintf(\"%s[%d]%s\", fieldName, index, module.NodeLocationSelfValue)\n\t\t\t\terr := block.SetNodeLocation(relativePath, module.BuildLocationFromYamlNode(x.yamlFilePath, elementFullPath, elementNode))\n\t\t\t\tif err != nil {\n\t\t\t\t\tdiagnostics.AddErrorMsg(\"file = %s, set location %s error: %s\", x.yamlFilePath, elementFullPath, err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(elementSlice) == 0 {\n\t\t\treturn nil\n\t\t}\n\tcase yaml.ScalarNode:\n\t\tindex := 0\n\t\telementNode := entry.value\n\t\telementFullPath := fmt.Sprintf(\"%s[%d]\", blockPath, index)\n\t\tuseNodeValue := x.parseStringWithDiagnostics(entry.value, elementFullPath, diagnostics)\n\t\tif useNodeValue != \"\" {\n\n\t\t\telementSlice = append(elementSlice, useNodeValue)\n\n\t\t\trelativePath := fmt.Sprintf(\"%s[%d]%s\", fieldName, index, module.NodeLocationSelfValue)\n\t\t\terr := block.SetNodeLocation(relativePath, module.BuildLocationFromYamlNode(x.yamlFilePath, elementFullPath, elementNode))\n\t\t\tif err != nil {\n\t\t\t\tdiagnostics.AddErrorMsg(\"file = %s, set location %s error: %s\", x.yamlFilePath, elementFullPath, err.Error())\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForArrayType(entry.key, blockPath))\n\t}\n\n\t// set self location\n\tx.setLocationKVWithDiagnostics(block, fieldName, blockPath, entry, diagnostics)\n\n\treturn elementSlice\n}\n\nfunc (x *YamlFileToModuleParser) parseStringMapAndSetLocation(block module.Block, fieldName string, entry *nodeEntry, blockBasePath string, diagnostics *schema.Diagnostics) map[string]interface{} {\n\n\tblockPath := blockBasePath + \".\" + fieldName\n\n\t// modules must be an array element\n\tif entry.value.Kind != yaml.MappingNode {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForMappingType(entry.key, blockPath))\n\t\treturn nil\n\t}\n\n\ttoMap, d := x.toMap(entry.value, blockPath)\n\tdiagnostics.AddDiagnostics(d)\n\tif utils.HasError(d) {\n\t\treturn nil\n\t}\n\n\tm := make(map[string]interface{}, 0)\n\tfor key, entry := range toMap {\n\t\tif entry.value == nil {\n\t\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForScalarType(entry.key, blockPath, \"no empty\"))\n\t\t\tcontinue\n\t\t}\n\n\t\tm[key] = x.parseInterfaceValueWithDiagnosticsAndSetLocation(block, fieldName+\".\"+key, entry, blockBasePath, diagnostics)\n\t}\n\n\tx.setLocationKVWithDiagnostics(block, fieldName, blockPath, entry, diagnostics)\n\n\treturn m\n}\n\nfunc (x *YamlFileToModuleParser) parseAny(node *yaml.Node, blockPath string) (any, *schema.Diagnostics) {\n\tkeyName := \"any-key\"\n\thandlerNode := yaml.Node{\n\t\tKind: yaml.MappingNode,\n\t\tContent: []*yaml.Node{\n\t\t\t&yaml.Node{\n\t\t\t\tKind:  yaml.ScalarNode,\n\t\t\t\tValue: keyName,\n\t\t\t},\n\t\t\tnode,\n\t\t},\n\t}\n\tout, err := yaml.Marshal(handlerNode)\n\tif err != nil {\n\t\t// TODO\n\t\treturn nil, schema.NewDiagnostics().AddErrorMsg(err.Error())\n\t}\n\tvar r map[string]any\n\terr = yaml.Unmarshal(out, &r)\n\tif err != nil {\n\t\treturn nil, schema.NewDiagnostics().AddErrorMsg(err.Error())\n\t}\n\treturn r[keyName], nil\n}\n\nfunc (x *YamlFileToModuleParser) parseUintWithDiagnostics(node *yaml.Node, blockPath string, diagnostics *schema.Diagnostics) *uint64 {\n\tif node.Kind != yaml.ScalarNode {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForScalarType(node, blockPath, \"int\"))\n\t\treturn nil\n\t}\n\tintValue, err := strconv.Atoi(strings.TrimSpace(node.Value))\n\tif err != nil {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForScalarType(node, blockPath, \"int\"))\n\t\treturn nil\n\t}\n\tif intValue < 0 {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForScalarType(node, blockPath, \"uint\"))\n\t\treturn nil\n\t}\n\treturn pointer.ToUint64Pointer(uint64(intValue))\n}\n\nfunc (x *YamlFileToModuleParser) parseStringWithDiagnostics(node *yaml.Node, blockPath string, diagnostics *schema.Diagnostics) string {\n\tif node.Kind == yaml.ScalarNode {\n\t\treturn node.Value\n\t} else {\n\t\tdiagnostics.AddDiagnostics(x.buildNodeErrorMsgForScalarType(node, blockPath, \"string\"))\n\t\treturn \"\"\n\t}\n}\n\nfunc (x *YamlFileToModuleParser) parseInterfaceWithDiagnostics(node *yaml.Node, blockPath string, diagnostics *schema.Diagnostics) interface{} {\n\t// 如果节点类型是map或者list，需要转换成json再转换成interface\n\tif node.Kind == yaml.MappingNode {\n\t\tvar mapNode map[string]interface{}\n\t\tb, err := yaml.Marshal(node)\n\t\tif err != nil {\n\t\t\tdiagnostics.AddErrorMsg(\"file = %s, marshal node %s error: %s\", x.yamlFilePath, blockPath, err.Error())\n\t\t\treturn nil\n\t\t}\n\t\terr = yaml.Unmarshal(b, &mapNode)\n\t\tif err != nil {\n\t\t\tdiagnostics.AddErrorMsg(\"file = %s, unmarshal node %s error: %s\", x.yamlFilePath, blockPath, err.Error())\n\t\t\treturn nil\n\t\t}\n\t\treturn mapNode\n\t}\n\tif node.Kind == yaml.SequenceNode {\n\t\tvar listNode []interface{}\n\t\tb, err := yaml.Marshal(node)\n\t\tif err != nil {\n\t\t\tdiagnostics.AddErrorMsg(\"file = %s, marshal node %s error: %s\", x.yamlFilePath, blockPath, err.Error())\n\t\t\treturn nil\n\t\t}\n\t\terr = yaml.Unmarshal(b, &listNode)\n\t\tif err != nil {\n\t\t\tdiagnostics.AddErrorMsg(\"file = %s, unmarshal node %s error: %s\", x.yamlFilePath, blockPath, err.Error())\n\t\t\treturn nil\n\t\t}\n\t\treturn listNode\n\t}\n\treturn node.Value\n}\n\nfunc (x *YamlFileToModuleParser) parseFilterWithDiagnostics(node *yaml.Node, blockPath string, diagnostics *schema.Diagnostics) []module.Filter {\n\tvar filters []module.Filter\n\tb, err := yaml.Marshal(node)\n\tif err != nil {\n\t\treturn []module.Filter{}\n\t}\n\terr = yaml.Unmarshal(b, &filters)\n\tif err != nil {\n\t\treturn []module.Filter{}\n\t}\n\treturn filters\n}\n\ntype nodeEntry struct {\n\tkey, value *yaml.Node\n}\n\nfunc newNodeEntry(key, value *yaml.Node) *nodeEntry {\n\treturn &nodeEntry{\n\t\tkey:   key,\n\t\tvalue: value,\n\t}\n}\n\nfunc (x *YamlFileToModuleParser) toMap(node *yaml.Node, blockPath string) (map[string]*nodeEntry, *schema.Diagnostics) {\n\n\t// check node type, must is mapping type\n\tif node.Kind != yaml.MappingNode {\n\t\treturn nil, x.buildNodeErrorMsgForMappingType(node, blockPath)\n\t}\n\n\t// convert to map\n\tm := make(map[string]*nodeEntry, 0)\n\tfor index := 0; index < len(node.Content); index += 2 {\n\t\tkey := node.Content[index]\n\n\t\t// key must is string type\n\t\tif key.Kind != yaml.ScalarNode {\n\t\t\treturn nil, x.buildNodeErrorMsgForScalarType(key, fmt.Sprintf(\"%s.%s\", blockPath, key.Value), \"string\")\n\t\t}\n\n\t\tvalue := node.Content[index+1]\n\t\tm[key.Value] = &nodeEntry{\n\t\t\tkey:   key,\n\t\t\tvalue: value,\n\t\t}\n\t}\n\treturn m, nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *YamlFileToModuleParser) setLocationKVWithDiagnostics(block module.Block, relativeYamlSelectorPath, fullYamlSelectorPath string, nodeEntry *nodeEntry, diagnostics *schema.Diagnostics) {\n\n\tif nodeEntry.key != nil {\n\t\tx.setLocationWithDiagnostics(block, relativeYamlSelectorPath+module.NodeLocationSelfKey, fullYamlSelectorPath, nodeEntry.key, diagnostics)\n\t}\n\n\tif nodeEntry.value != nil {\n\t\tx.setLocationWithDiagnostics(block, relativeYamlSelectorPath+module.NodeLocationSelfValue, fullYamlSelectorPath, nodeEntry.value, diagnostics)\n\t}\n}\n\nfunc (x *YamlFileToModuleParser) setLocationWithDiagnostics(block module.Block, relativeYamlSelectorPath, fullYamlSelectorPath string, node *yaml.Node, diagnostics *schema.Diagnostics) {\n\tlocation := module.BuildLocationFromYamlNode(x.yamlFilePath, fullYamlSelectorPath, node)\n\terr := block.SetNodeLocation(relativeYamlSelectorPath, location)\n\tif err != nil {\n\t\tdiagnostics.AddErrorMsg(\"YamlFileToModuleParser error, build location for file %s %s error: %s\", x.yamlFilePath, fullYamlSelectorPath, err.Error())\n\t}\n}\n\nfunc (x *YamlFileToModuleParser) buildNodeErrorMsgForUnSupport(keyNode, valueNode *yaml.Node, blockPath string) *schema.Diagnostics {\n\tkeyLocation := module.BuildLocationFromYamlNode(x.yamlFilePath, blockPath, keyNode)\n\tvalueLocation := module.BuildLocationFromYamlNode(x.yamlFilePath, blockPath, valueNode)\n\tlocation := module.MergeKeyValueLocation(keyLocation, valueLocation)\n\tlocation.YamlSelector = keyLocation.YamlSelector\n\terrorMsg := fmt.Sprintf(\"syntax error, do not support %s\", blockPath)\n\treport := module.RenderErrorTemplate(errorMsg, location)\n\treturn schema.NewDiagnostics().AddErrorMsg(report)\n}\n\nfunc (x *YamlFileToModuleParser) buildNodeErrorMsgForScalarType(node *yaml.Node, blockPath string, scalarTypeName string) *schema.Diagnostics {\n\treturn x.buildNodeErrorMsg(blockPath, node, fmt.Sprintf(\"syntax error, %s must is a %s type\", blockPath, scalarTypeName))\n}\n\nfunc (x *YamlFileToModuleParser) buildNodeErrorMsgForSequenceType(node *yaml.Node, blockPath string, scalarTypeName string) *schema.Diagnostics {\n\treturn x.buildNodeErrorMsg(blockPath, node, fmt.Sprintf(\"syntax error, %s must is a %s type\", blockPath, scalarTypeName))\n}\n\nfunc (x *YamlFileToModuleParser) buildNodeErrorMsgForMappingType(node *yaml.Node, blockPath string) *schema.Diagnostics {\n\treturn x.buildNodeErrorMsg(blockPath, node, fmt.Sprintf(\"syntax error, %s block must is a mapping type\", blockPath))\n}\n\nfunc (x *YamlFileToModuleParser) buildNodeErrorMsgForArrayType(node *yaml.Node, blockPath string) *schema.Diagnostics {\n\treturn x.buildNodeErrorMsg(blockPath, node, fmt.Sprintf(\"syntax error, %s block must is a array type\", blockPath))\n}\n\nfunc (x *YamlFileToModuleParser) buildNodeErrorMsg(blockPath string, node *yaml.Node, errorMessage string) *schema.Diagnostics {\n\tlocation := module.BuildLocationFromYamlNode(x.yamlFilePath, blockPath, node)\n\treport := module.RenderErrorTemplate(errorMessage, location)\n\treturn schema.NewDiagnostics().AddErrorMsg(report)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/parser/yaml_file_to_module_parser_test.go",
    "content": "package parser\n\nimport (\n\t\"fmt\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestYamlFileToModuleParser_Parse(t *testing.T) {\n\tmodule, diagnostics := NewYamlFileToModuleParser(\"./test_data/test_modules.yaml\").Parse()\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\n\tlocation := module.RulesBlock[0].MetadataBlock.GetNodeLocation(\"tags[0]._value\")\n\ts := location.ReadSourceString()\n\tfmt.Println(s)\n}\n"
  },
  {
    "path": "pkg/modules/planner/module_planner.go",
    "content": "package planner\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\tmoduleBlock \"github.com/selefra/selefra/pkg/modules/module\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// MakeModuleQueryPlan Generate an execution plan for the module\nfunc MakeModuleQueryPlan(ctx context.Context, options *ModulePlannerOptions) (*ModulePlan, *schema.Diagnostics) {\n\treturn NewModulePlanner(options).MakePlan(ctx)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ModulePlan Represents the execution plan of a module\ntype ModulePlan struct {\n\tInstruction map[string]interface{}\n\n\t// Which module is this execution plan generated for\n\t*module.Module\n\n\t// Scope at the module level\n\tModuleScope *Scope\n\n\t// The execution plan of the submodule\n\tSubModulesPlan []*ModulePlan\n\n\t// The execution plan of the rule under this module\n\tRulesPlan []*RulePlan\n}\n\n//// ------------------------------------------------- --------------------------------------------------------------------\n//\n//// RootModulePlan The execution plan of the root module\n//type RootModulePlan struct {\n//\n//\t// The root module's execution plan is also a module execution plan\n//\t*ModulePlan\n//\n//\t// The provider pull plan for all the following modules is extracted to the root module level\n//\tProviderFetchPlanSlice []*ProviderFetchPlan\n//}\n//\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ModulePlannerOptions Options when creating the Module Planner\ntype ModulePlannerOptions struct {\n\tInstruction map[string]interface{}\n\t// make plan for which module\n\tModule *module.Module\n\n\t// Table to Provider mapping\n\tTableToProviderMap map[string]string\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ModulePlanner Used to generate an execution plan for a module\ntype ModulePlanner struct {\n\toptions *ModulePlannerOptions\n}\n\nvar _ Planner[*ModulePlan] = &ModulePlanner{}\n\nfunc NewModulePlanner(options *ModulePlannerOptions) *ModulePlanner {\n\treturn &ModulePlanner{\n\t\toptions: options,\n\t}\n}\n\nfunc (x *ModulePlanner) Name() string {\n\treturn \"module-planner\"\n}\n\nfunc (x *ModulePlanner) MakePlan(ctx context.Context) (*ModulePlan, *schema.Diagnostics) {\n\treturn x.buildModulePlanner(ctx, x.options.Module, NewScope())\n}\n\n// Specify execution plans for modules and submodules\nfunc (x *ModulePlanner) buildModulePlanner(ctx context.Context, module *module.Module, moduleScope *Scope) (*ModulePlan, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tmodulePlan := &ModulePlan{\n\t\tInstruction: x.options.Instruction,\n\t\tModule:      module,\n\t\t// Inherits the scope of the parent module\n\t\tModuleScope:    moduleScope,\n\t\tSubModulesPlan: nil,\n\t\tRulesPlan:      nil,\n\t}\n\n\t// Generate an execution plan for the rules in the module\n\tfor _, ruleBlock := range module.RulesBlock {\n\t\trulePlan, d := NewRulePlanner(&RulePlannerOptions{\n\t\t\tModulePlan:         modulePlan,\n\t\t\tModule:             module,\n\t\t\tModuleScope:        modulePlan.ModuleScope,\n\t\t\tRuleBlock:          ruleBlock,\n\t\t\tTableToProviderMap: x.options.TableToProviderMap,\n\t\t}).MakePlan(ctx)\n\t\tif diagnostics.Add(d).HasError() {\n\t\t\treturn nil, diagnostics\n\t\t}\n\t\tmodulePlan.RulesPlan = append(modulePlan.RulesPlan, rulePlan)\n\t}\n\n\tvar subModuleInputMap map[string]*moduleBlock.ModuleBlock\n\tif len(module.ModulesBlock) != 0 {\n\t\tsubModuleInputMap = module.ModulesBlock.ModulesInputMap()\n\t}\n\t// Generate an execution plan for the submodules\n\tfor _, subModule := range module.SubModules {\n\n\t\tsubModuleScope := ExtendScope(modulePlan.ModuleScope)\n\n\t\t// Also, the module may have some initialized variables\n\t\tif subModuleInputMap != nil {\n\t\t\tif subModuleBlock := subModuleInputMap[subModule.Source]; subModuleBlock != nil && len(subModuleBlock.Input) != 0 {\n\t\t\t\tsubModuleScope.SetVariables(subModuleBlock.Input)\n\t\t\t}\n\t\t}\n\n\t\tsubModulePlan, d := x.buildModulePlanner(ctx, subModule, subModuleScope)\n\t\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\t\treturn nil, diagnostics\n\t\t}\n\t\tmodulePlan.SubModulesPlan = append(modulePlan.SubModulesPlan, subModulePlan)\n\t}\n\n\treturn modulePlan, diagnostics\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/planner/planner.go",
    "content": "package planner\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n)\n\n// Planner Represents a planner that can generate a plan\ntype Planner[T any] interface {\n\n\t// Name The name of the planner\n\tName() string\n\n\t// MakePlan Make a plan\n\tMakePlan(ctx context.Context) (T, *schema.Diagnostics)\n}\n"
  },
  {
    "path": "pkg/modules/planner/provider_fetch_planner.go",
    "content": "package planner\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-provider-sdk/storage\"\n\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n\t\"github.com/selefra/selefra-provider-sdk/storage_factory\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"github.com/selefra/selefra/pkg/selefra_workspace\"\n\t\"github.com/selefra/selefra/pkg/storage/pgstorage\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"os\"\n\t\"time\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProvidersFetchPlan The installation plan of a batch of providers\ntype ProvidersFetchPlan []*ProviderFetchPlan\n\n// BuildProviderContextMap Create an execution context for the provider installation plan\nfunc (x ProvidersFetchPlan) BuildProviderContextMap(ctx context.Context, DSN string) (map[string][]*ProviderContext, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tm := make(map[string][]*ProviderContext, 0)\n\tfor _, plan := range x {\n\n\t\t//databaseSchema := pgstorage.GetSchemaKey(plan.Name, plan.Version, plan.ProviderConfigurationBlock)\n\t\t//options := postgresql_storage.NewPostgresqlStorageOptions(DSN)\n\t\t//options.SearchPath = databaseSchema\n\n\t\toptions := postgresql_storage.NewPostgresqlStorageOptions(DSN)\n\t\toptions.SearchPath = plan.FetchToDatabaseSchema\n\n\t\tdatabaseStorage, d := storage_factory.NewStorage(ctx, storage_factory.StorageTypePostgresql, options)\n\t\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\t\treturn nil, diagnostics\n\t\t}\n\n\t\tproviderContext := &ProviderContext{\n\t\t\tProviderName:          plan.Name,\n\t\t\tProviderVersion:       plan.Version,\n\t\t\tDSN:                   DSN,\n\t\t\tSchema:                plan.FetchToDatabaseSchema,\n\t\t\tStorage:               databaseStorage,\n\t\t\tProviderConfiguration: plan.ProviderConfigurationBlock,\n\t\t}\n\t\tm[plan.Name] = append(m[plan.Name], providerContext)\n\t}\n\n\treturn m, diagnostics\n}\n\n// ProviderContext Ready execution strategy\ntype ProviderContext struct {\n\n\t// Which provider is it?\n\tProviderName string\n\n\t// Which version\n\tProviderVersion string\n\n\tDSN string\n\n\t// The database stored to\n\tSchema string\n\n\t// A connection to a database instance\n\tStorage storage.Storage\n\n\t// The provider configuration block\n\tProviderConfiguration *module.ProviderBlock\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst (\n\tDefaultMaxGoroutines = uint64(100)\n)\n\n// ProviderFetchPlan Indicates the pull plan of a provider\ntype ProviderFetchPlan struct {\n\t*ProviderInstallPlan\n\n\t// provider Configuration information used for fetching\n\tProviderConfigurationBlock *module.ProviderBlock\n\n\t// Which schema to write data to\n\tFetchToDatabaseSchema string\n\n\t// The name of the configuration block to be used, which is left blank if not configured using a configuration file\n\tProviderConfigurationName string\n\n\t// What is the MD5 of the configuration block if the provider configuration is used\n\tProviderConfigurationMD5 string\n}\n\nfunc NewProviderFetchPlan(providerName, providerVersion string, providerBlock *module.ProviderBlock) *ProviderFetchPlan {\n\treturn &ProviderFetchPlan{\n\t\tProviderInstallPlan: &ProviderInstallPlan{\n\t\t\tProvider: registry.NewProvider(providerName, providerVersion),\n\t\t},\n\t\tProviderConfigurationBlock: providerBlock,\n\t}\n}\n\n// GetProvidersConfigYamlString Obtain the configuration file for running the Provider\nfunc (x *ProviderFetchPlan) GetProvidersConfigYamlString() string {\n\tif x.ProviderConfigurationBlock != nil {\n\t\treturn x.ProviderConfigurationBlock.ProvidersConfigYamlString\n\t}\n\treturn \"\"\n}\n\n// GetNeedPullTablesName Gets which tables to pull when pulling\nfunc (x *ProviderFetchPlan) GetNeedPullTablesName() []string {\n\ttables := make([]string, 0)\n\tif x.ProviderConfigurationBlock != nil {\n\t\ttables = x.ProviderConfigurationBlock.Resources\n\t}\n\tif len(tables) == 0 {\n\t\ttables = append(tables, provider.AllTableNameWildcard)\n\t}\n\treturn tables\n}\n\n// GetMaxGoroutines How many concurrency is used to pull the table data\nfunc (x *ProviderFetchPlan) GetMaxGoroutines() uint64 {\n\tif x.ProviderConfigurationBlock != nil && x.ProviderConfigurationBlock.MaxGoroutines != nil {\n\t\treturn *x.ProviderConfigurationBlock.MaxGoroutines\n\t} else {\n\t\treturn DefaultMaxGoroutines\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProviderFetchPlannerOptions This parameter is required when creating the provider execution plan\ntype ProviderFetchPlannerOptions struct {\n\n\t// Which module is the execution plan being generated for\n\tModule *module.Module\n\n\t// Provider version that wins the vote\n\tProviderVersionVoteWinnerMap map[string]string\n\n\t// DSNS are used to connect to the database to determine which schema to use when using environment variables\n\tDSN string\n\n\t// A place to send messages to the outside world\n\tMessageChannel *message.Channel[*schema.Diagnostics]\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ProviderFetchPlanner struct {\n\toptions *ProviderFetchPlannerOptions\n}\n\nvar _ Planner[ProvidersFetchPlan] = &ProviderFetchPlanner{}\n\nfunc NewProviderFetchPlanner(options *ProviderFetchPlannerOptions) *ProviderFetchPlanner {\n\treturn &ProviderFetchPlanner{\n\t\toptions: options,\n\t}\n}\n\nfunc (x *ProviderFetchPlanner) Name() string {\n\treturn \"provider-fetch-planner\"\n}\n\nfunc (x *ProviderFetchPlanner) MakePlan(ctx context.Context) (ProvidersFetchPlan, *schema.Diagnostics) {\n\n\tdefer func() {\n\t\tx.options.MessageChannel.SenderWaitAndClose()\n\t}()\n\n\treturn x.expandByConfiguration(ctx)\n}\n\n// Expand to multiple tasks based on the configuration\nfunc (x *ProviderFetchPlanner) expandByConfiguration(ctx context.Context) ([]*ProviderFetchPlan, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\tproviderFetchPlanSlice := make([]*ProviderFetchPlan, 0)\n\n\tif x.options.Module.SelefraBlock == nil {\n\t\treturn nil, diagnostics.AddErrorMsg(\"Module %s must have selefra block for make fetch plan\", x.options.Module.BuildFullName())\n\t} else if len(x.options.Module.SelefraBlock.RequireProvidersBlock) == 0 {\n\t\treturn nil, diagnostics.AddErrorMsg(\"Module %s selefra block not have providers block\", x.options.Module.BuildFullName())\n\t}\n\n\t// Start a task for those that have a task written, some join by fetch start rule\n\tproviderNamePlanCountMap := make(map[string]int, 0)\n\tnameToProviderMap := x.options.Module.SelefraBlock.RequireProvidersBlock.BuildNameToProviderBlockMap()\n\tfor _, providerBlock := range x.options.Module.ProvidersBlock {\n\n\t\t// find required provider block\n\t\trequiredProviderBlock, exists := nameToProviderMap[providerBlock.Provider]\n\t\tif !exists {\n\t\t\t// selefra.providers block not found that name in providers[index] configuration\n\t\t\terrorTips := fmt.Sprintf(\"Provider name %s not found\", providerBlock.Provider)\n\t\t\tdiagnostics.AddErrorMsg(module.RenderErrorTemplate(errorTips, providerBlock.GetNodeLocation(\"provider\"+module.NodeLocationSelfValue)))\n\t\t\tcontinue\n\t\t}\n\n\t\t// find use provider version\n\t\tproviderWinnerVersion, exists := x.options.ProviderVersionVoteWinnerMap[requiredProviderBlock.Source]\n\t\tif !exists {\n\t\t\terrorTips := fmt.Sprintf(\"Provider version %s not found\", requiredProviderBlock.Source)\n\t\t\tdiagnostics.AddErrorMsg(module.RenderErrorTemplate(errorTips, requiredProviderBlock.GetNodeLocation(\"version\")))\n\t\t\tcontinue\n\t\t}\n\n\t\t// Start a plan for the provider\n\t\tproviderNamePlanCountMap[requiredProviderBlock.Source]++\n\t\tproviderFetchPlan := NewProviderFetchPlan(requiredProviderBlock.Source, providerWinnerVersion, providerBlock)\n\n\t\tfetchToDatabaseSchema := pgstorage.GetSchemaKey(requiredProviderBlock.Source, providerWinnerVersion, providerBlock)\n\t\tproviderFetchPlan.FetchToDatabaseSchema = fetchToDatabaseSchema\n\t\tproviderFetchPlanSlice = append(providerFetchPlanSlice, providerFetchPlan)\n\n\t}\n\tif diagnostics.HasError() {\n\t\treturn nil, diagnostics\n\t}\n\n\tdeviceID, d := selefra_workspace.GetDeviceID()\n\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\treturn nil, diagnostics\n\t}\n\n\t// See if there is another project that has not been activated, and if there is, start a pull plan for it as well\n\tfor providerName, providerVersion := range x.options.ProviderVersionVoteWinnerMap {\n\t\tif providerNamePlanCountMap[providerName] > 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tproviderFetchPlan := NewProviderFetchPlan(providerName, providerVersion, nil)\n\t\tfetchToDatabaseSchema, d := x.decideDatabaseSchemaForNoProviderBlockPlan(ctx, providerFetchPlan, deviceID)\n\t\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\t\tcontinue\n\t\t}\n\t\tproviderFetchPlan.FetchToDatabaseSchema = fetchToDatabaseSchema\n\t\tproviderFetchPlanSlice = append(providerFetchPlanSlice, providerFetchPlan)\n\t}\n\n\treturn providerFetchPlanSlice, diagnostics\n}\n\n// Generate schema names for pull plans that do not have provider blocks\nfunc (x *ProviderFetchPlanner) decideDatabaseSchemaForNoProviderBlockPlan(ctx context.Context, plan *ProviderFetchPlan, deviceID string) (string, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// Verify that the database is available\n\tfetchToDatabaseSchema := pgstorage.GetSchemaKey(plan.Name, plan.Version, nil)\n\tpgstorage.WithSearchPath(fetchToDatabaseSchema)\n\tpostgresqlOptions := postgresql_storage.NewPostgresqlStorageOptions(x.options.DSN)\n\tdatabaseStorage, d := storage_factory.NewStorage(ctx, storage_factory.StorageTypePostgresql, postgresqlOptions)\n\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\treturn \"\", diagnostics\n\t}\n\t// storage created must remember to close\n\tdefer func() {\n\t\tdatabaseStorage.Close()\n\t}()\n\towner, d := pgstorage.GetSchemaOwner(ctx, databaseStorage)\n\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\treturn \"\", diagnostics\n\t}\n\tif owner == nil {\n\t\t// This schema is still in unowned state. Try to get its attribution\n\t\td := x.grabDatabaseSchema(ctx, plan, deviceID, databaseStorage)\n\t\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\t\treturn \"\", diagnostics\n\t\t}\n\t\treturn fetchToDatabaseSchema, diagnostics\n\t}\n\n\t// If the schema is already occupied by someone, check to see if that person is yourself\n\tif owner.HolderID == deviceID {\n\t\t// If that person is yourself, then you can continue to use it\n\t\treturn fetchToDatabaseSchema, diagnostics\n\t}\n\n\t// The previous schema is occupied, so you have to use your own separate schema\n\tfetchToDatabaseSchema = fetchToDatabaseSchema + \"_\" + deviceID\n\treturn fetchToDatabaseSchema, diagnostics\n}\n\n// Use the database schema\n// When a schema is assigned to the provider in the execution plan, the ownership of the schema is also marked for the provider to avoid schema ownership disputes during the execution phase\nfunc (x *ProviderFetchPlanner) grabDatabaseSchema(ctx context.Context, plan *ProviderFetchPlan, deviceID string, storage storage.Storage) *schema.Diagnostics {\n\n\tlockOwnerId := utils.BuildLockOwnerId()\n\ttryTimes := 0\n\n\tfor {\n\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Provider %s, schema %s, owner %s, make execute plan, begin try get database schema lock...\", plan.String(), plan.FetchToDatabaseSchema, lockOwnerId))\n\n\t\ttryTimes++\n\t\terr := storage.Lock(ctx, pgstorage.LockId, lockOwnerId)\n\t\tif err != nil {\n\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"Provider %s, schema %s, owner %s, make execute plan, get database schema lock error: %s, will sleep & retry, tryTimes = %d\", plan.String(), plan.FetchToDatabaseSchema, lockOwnerId, err.Error(), tryTimes))\n\t\t} else {\n\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Provider %s, schema %s, owner %s, make execute plan, get database schema lock success\", plan.String(), plan.FetchToDatabaseSchema, lockOwnerId))\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second * 10)\n\t}\n\tdefer func() {\n\t\tfor tryTimes := 0; tryTimes < 10; tryTimes++ {\n\t\t\terr := storage.UnLock(ctx, pgstorage.LockId, lockOwnerId)\n\t\t\tif err != nil {\n\t\t\t\tif errors.Is(err, postgresql_storage.ErrLockNotFound) {\n\t\t\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Provider %s, schema %s, owner = %s, release database schema lock success\", plan.String(), plan.FetchToDatabaseSchema, lockOwnerId))\n\t\t\t\t} else {\n\t\t\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"Provider %s, schema %s, owner = %s, release database schema lock error: %s, will sleep & retry, tryTimes = %d\", plan.String(), plan.FetchToDatabaseSchema, lockOwnerId, err.Error(), tryTimes))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Provider %s, schema %s, owner = %s, release database schema lock success\", plan.String(), plan.FetchToDatabaseSchema, lockOwnerId))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\t// You can hold this database, It's okay to hold the database, because you were the first one there\n\t// First set a tag bit to occupy this schema\n\thostname, _ := os.Hostname()\n\treturn pgstorage.SaveSchemaOwner(ctx, storage, &pgstorage.SchemaOwnerInformation{\n\t\tHostname: hostname,\n\t\tHolderID: deviceID,\n\t\t// TODO If you are using a configuration file, put these two fields on the Settings\n\t\tConfigurationName: \"\",\n\t\tConfigurationMD5:  \"\",\n\t})\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/planner/provider_fetch_planner_test.go",
    "content": "package planner\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/env\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/json_util\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/modules/module_loader\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestProviderFetchPlanner_MakePlan(t *testing.T) {\n\n\t//rootModule := module.NewModule()\n\t//rootModule.SelefraBlock = module.NewSelefraBlock()\n\t//rootModule.SelefraBlock.RequireProvidersBlock = []*module.RequireProviderBlock{\n\t//\t{\n\t//\t\tName:    \"aws\",\n\t//\t\tSource:  \"aws\",\n\t//\t\tVersion: \"latest\",\n\t//\t},\n\t//\t{\n\t//\t\tName:    \"gcp\",\n\t//\t\tSource:  \"gcp\",\n\t//\t\tVersion: \"latest\",\n\t//\t},\n\t//}\n\t//rootModule.ProvidersBlock = []*module.ProviderBlock{\n\t//\t{\n\t//\t\tName:          \"aws-001\",\n\t//\t\tProvider:      \"aws\",\n\t//\t\tMaxGoroutines: pointer.ToUInt64Pointer(10),\n\t//\t},\n\t//\t{\n\t//\t\tName:          \"aws-002\",\n\t//\t\tProvider:      \"aws\",\n\t//\t\tMaxGoroutines: pointer.ToUInt64Pointer(30),\n\t//\t},\n\t//}\n\t//versionWinnerMap := map[string]string{\n\t//\t\"aws\": \"v0.0.1\",\n\t//\t\"gcp\": \"v0.0.1\",\n\t//}\n\t//plan, diagnostics := NewProviderFetchPlanner(&ProviderFetchPlannerOptions{\n\t//\tModule:                       rootModule,\n\t//\tProviderVersionVoteWinnerMap: versionWinnerMap,\n\t//}).MakePlan(context.Background())\n\t//assert.False(t, utils.HasError(diagnostics))\n\t//assert.Equal(t, 3, len(plan))\n\n\t// load module\n\tmoduleDirectory := \"./test_data/provider_fetch_planner\"\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.String())\n\t\t}\n\t})\n\tloader, err := module_loader.NewLocalDirectoryModuleLoader(&module_loader.LocalDirectoryModuleLoaderOptions{\n\t\tModuleLoaderOptions: &module_loader.ModuleLoaderOptions{\n\t\t\tSource:            moduleDirectory,\n\t\t\tVersion:           \"\",\n\t\t\tDownloadDirectory: \"./test_download\",\n\t\t\tProgressTracker:   nil,\n\t\t\tMessageChannel:    messageChannel,\n\t\t},\n\t\tModuleDirectory: moduleDirectory,\n\t})\n\tassert.Nil(t, err)\n\trootModule, isLoadSuccess := loader.Load(context.Background())\n\tmessageChannel.ReceiverWait()\n\tassert.True(t, isLoadSuccess)\n\n\t// check module\n\tvalidatorContext := module.NewValidatorContext()\n\td := rootModule.Check(rootModule, validatorContext)\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.String())\n\t}\n\tassert.False(t, utils.HasError(d))\n\tif utils.HasError(d) {\n\t\treturn\n\t}\n\n\tversionWinnerMap := map[string]string{\n\t\t\"aws\": \"v0.0.1\",\n\t\t\"gcp\": \"v0.0.1\",\n\t}\n\tmessageChannel = message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.String())\n\t\t}\n\t})\n\tproviderFetchPlans, diagnostics := NewProviderFetchPlanner(&ProviderFetchPlannerOptions{\n\t\tModule:                       rootModule,\n\t\tProviderVersionVoteWinnerMap: versionWinnerMap,\n\t\tDSN:                          env.GetDatabaseDsn(),\n\t\tMessageChannel:               messageChannel,\n\t}).MakePlan(context.Background())\n\tmessageChannel.ReceiverWait()\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.String())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotEqual(t, 0, len(providerFetchPlans))\n\n\tt.Log(json_util.ToJsonString(providerFetchPlans))\n\n}\n"
  },
  {
    "path": "pkg/modules/planner/provider_install_planner.go",
    "content": "package planner\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/selefra/selefra/pkg/version\"\n\t\"strings\"\n)\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// MakeProviderInstallPlan Plan the provider installation for the module\nfunc MakeProviderInstallPlan(ctx context.Context, module *module.Module) (ProvidersInstallPlan, *schema.Diagnostics) {\n\treturn NewProviderInstallPlanner(module).MakePlan(ctx)\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\ntype ProvidersInstallPlan []*ProviderInstallPlan\n\nfunc (x ProvidersInstallPlan) ToMap() map[string]string {\n\tm := make(map[string]string)\n\tfor _, p := range x {\n\t\tm[p.Name] = p.Version\n\t}\n\treturn m\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProviderInstallPlan Indicates the installation plan of a provider\ntype ProviderInstallPlan struct {\n\t// Which version of which provider is to be used to pull data\n\t*registry.Provider\n}\n\n// NewProviderInstallPlan Create an installation plan based on the provider name and version number\nfunc NewProviderInstallPlan(providerName, providerVersion string) *ProviderInstallPlan {\n\treturn &ProviderInstallPlan{\n\t\tProvider: registry.NewProvider(providerName, providerVersion),\n\t}\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// ProviderInstallPlanner This command is used to plan the provider installation for Module\ntype ProviderInstallPlanner struct {\n\tmodule *module.Module\n}\n\nvar _ Planner[ProvidersInstallPlan] = &ProviderInstallPlanner{}\n\nfunc NewProviderInstallPlanner(module *module.Module) *ProviderInstallPlanner {\n\treturn &ProviderInstallPlanner{\n\t\tmodule: module,\n\t}\n}\n\nfunc (x *ProviderInstallPlanner) Name() string {\n\treturn \"provider-install-planner\"\n}\n\nfunc (x *ProviderInstallPlanner) MakePlan(ctx context.Context) (ProvidersInstallPlan, *schema.Diagnostics) {\n\tdiagnostics := schema.NewDiagnostics()\n\tproviderVersionVoteWinnerMap, d := x.providerVersionVote(ctx)\n\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\treturn nil, diagnostics\n\t}\n\tproviderInstallPlanSlice := make([]*ProviderInstallPlan, 0)\n\tfor providerName, providerVersion := range providerVersionVoteWinnerMap {\n\t\tproviderInstallPlanSlice = append(providerInstallPlanSlice, NewProviderInstallPlan(providerName, providerVersion))\n\t}\n\treturn providerInstallPlanSlice, diagnostics\n}\n\n// provider version election to determine which provider version to use\nfunc (x *ProviderInstallPlanner) providerVersionVote(ctx context.Context) (map[string]string, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// Start with the root module and let all modules vote\n\tservice := NewProviderVersionVoteService()\n\tx.module.Traversal(ctx, func(ctx context.Context, traversalContext *module.TraversalContext) bool {\n\t\tdiagnostics.AddDiagnostics(service.Vote(ctx, traversalContext.Module))\n\t\treturn true\n\t})\n\tif utils.HasError(diagnostics) {\n\t\treturn nil, diagnostics\n\t}\n\n\t// Determine the final version used for each provider\n\tproviderVersionVoteWinnerMap := make(map[string]string, 0)\n\terrorReportSlice := make([]string, 0)\n\tfor providerName, voteInfo := range service.providerVersionVoteMap {\n\n\t\twinnersVersions := voteInfo.GetWinnersVersionVoteSummary()\n\n\t\t// The election was defeated, and no version received unanimous votes\n\t\tif len(winnersVersions) < 1 {\n\t\t\terrorReportSlice = append(errorReportSlice, x.buildVersionVoteFailedReport(voteInfo))\n\t\t} else {\n\t\t\t// Select the latest version of the provider that supports all Modules\n\t\t\twinnerVersionSlice := version.Sort(voteInfo.GetWinnersVersionSlice())\n\t\t\twinnerVersion := winnerVersionSlice[len(winnerVersionSlice)-1]\n\t\t\t// TODO debug log\n\t\t\tproviderVersionVoteWinnerMap[providerName] = winnerVersion\n\t\t}\n\n\t}\n\n\tif len(errorReportSlice) > 0 {\n\t\tfor index, report := range errorReportSlice {\n\t\t\tif index != len(errorReportSlice)-1 {\n\t\t\t\treport += \"\\n\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\\n\"\n\t\t\t}\n\t\t\tdiagnostics.AddErrorMsg(report)\n\t\t}\n\t}\n\treturn providerVersionVoteWinnerMap, diagnostics\n}\n\n// When a vote fails, construct a general report so the user knows what went wrong\nfunc (x *ProviderInstallPlanner) buildVersionVoteFailedReport(providerVote *ProviderVote) string {\n\treport := strings.Builder{}\n\treport.WriteString(fmt.Sprintf(\"Failed to vote version for provider %s: \\n\", providerVote.ProviderName))\n\tfor module, versionSlice := range providerVote.ToModuleAllowProviderVersionMap() {\n\t\tversion.Sort(versionSlice)\n\t\treport.WriteString(fmt.Sprintf(\"Module %s suport version: %s \\n\", module.BuildFullName(), strings.Join(versionSlice, \", \")))\n\t}\n\treport.WriteString(fmt.Sprintf(\"Cannot find a %s provider version that supports all of the above modules\\n\", providerVote.ProviderName))\n\n\treturn report.String()\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/planner/provider_install_planner_test.go",
    "content": "package planner\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestNewProviderInstallPlanner(t *testing.T) {\n\n\t// case 1: To be able to pick a definitive version\n\trootModule := randomModule(\"v0.0.1, v0.0.2\")\n\trootModule.SubModules = append(rootModule.SubModules, randomModule(\"v0.0.2, v0.0.3\"))\n\tplan, diagnostics := NewProviderInstallPlanner(rootModule).MakePlan(context.Background())\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.Len(t, plan, 1)\n\tassert.Equal(t, \"v0.0.2\", plan[0].Version)\n\n\t//// case 2: The ability to select multiple explicit versions\n\trootModule = randomModule(\"v0.0.1, v0.0.2, v0.0.3\")\n\trootModule.SubModules = append(rootModule.SubModules, randomModule(\"v0.0.2, v0.0.3, v0.0.4\"))\n\tplan, diagnostics = NewProviderInstallPlanner(rootModule).MakePlan(context.Background())\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.Len(t, plan, 1)\n\tassert.Equal(t, \"v0.0.3\", plan[0].Version)\n\t//\n\t//// case 3: No definitive version could be selected\n\trootModule = randomModule(\"v0.0.1, v0.0.2\")\n\trootModule.SubModules = append(rootModule.SubModules, randomModule(\"v0.0.3, v0.0.4\"))\n\tplan, diagnostics = NewProviderInstallPlanner(rootModule).MakePlan(context.Background())\n\tassert.True(t, utils.HasError(diagnostics))\n\tt.Log(diagnostics.ToString())\n\tassert.Len(t, plan, 0)\n\n}\n"
  },
  {
    "path": "pkg/modules/planner/rule_planner.go",
    "content": "package planner\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"sort\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype RulePlan struct {\n\t// The execution plan of the module to which it is associated\n\tModulePlan *ModulePlan\n\n\t// The module to which it is associated\n\tModule *module.Module\n\n\t// Is the execution plan for which block\n\t*module.RuleBlock\n\n\t// Which provider is the rule bound to? Currently, a rule can be bound to only one provider\n\tBindingProviderName string\n\n\t// Render a good rule - bound Query\n\tQuery string\n\n\t// Which tables are used in this Query\n\tBindingTables []string\n\n\tRuleScope *Scope\n}\n\nfunc (x *RulePlan) String() string {\n\tif x.MetadataBlock != nil {\n\t\treturn x.Name + \":\" + x.MetadataBlock.Id\n\t} else {\n\t\treturn x.Name\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// MakeRulePlan Plan the execution of the rule\nfunc MakeRulePlan(ctx context.Context, options *RulePlannerOptions) (*RulePlan, *schema.Diagnostics) {\n\treturn NewRulePlanner(options).MakePlan(ctx)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// RulePlannerOptions Parameters required when creating a module execution plan\ntype RulePlannerOptions struct {\n\n\t// The execution plan of the module to which it is associated\n\tModulePlan *ModulePlan\n\n\t// The module to which it is associated\n\tModule *module.Module\n\n\t// The scope of the owning module\n\tModuleScope *Scope\n\n\t// Is the execution plan for which block\n\tRuleBlock *module.RuleBlock\n\n\t// Mapping between the table and the provider\n\tTableToProviderMap map[string]string\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// RulePlanner An enforcement plan for this rule\ntype RulePlanner struct {\n\toptions *RulePlannerOptions\n}\n\nvar _ Planner[*RulePlan] = &RulePlanner{}\n\nfunc (x *RulePlanner) Name() string {\n\treturn \"rule-planner\"\n}\n\nfunc NewRulePlanner(options *RulePlannerOptions) *RulePlanner {\n\treturn &RulePlanner{\n\t\toptions: options,\n\t}\n}\n\n// MakePlan Develop an implementation plan for rule\nfunc (x *RulePlanner) MakePlan(ctx context.Context) (*RulePlan, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// Render the query statement for the Rule\n\truleScope := ExtendScope(x.options.ModuleScope)\n\tquery, err := ruleScope.RenderingTemplate(x.options.RuleBlock.Query, x.options.RuleBlock.Query)\n\tif err != nil {\n\t\tlocation := x.options.RuleBlock.GetNodeLocation(\"query\" + module.NodeLocationSelfValue)\n\t\t// TODO 2023-2-24 15:10:15 bug: Can't correct marks used in yaml | a line\n\t\treport := module.RenderErrorTemplate(fmt.Sprintf(\"rendering query template error: %s\", err.Error()), location)\n\t\treturn nil, diagnostics.AddErrorMsg(report)\n\t}\n\n\t//Resolve the binding of the Rule to the Provider and table\n\tbindingProviders, bindingTables := x.extractBinding(query, x.options.TableToProviderMap)\n\tif len(bindingProviders) != 1 {\n\t\tvar errorTips string\n\t\tif len(bindingProviders) == 0 {\n\t\t\terrorTips = fmt.Sprintf(\"Your rule query should use at least one of the provider tables. Check that your sql is written correctly: %s\", x.options.RuleBlock.Query)\n\t\t} else {\n\t\t\terrorTips = fmt.Sprintf(\"The tables used in your rule query span multiple providers; the current version of the rule query only allows several tables from one provider to be used: %s\", x.options.RuleBlock.Query)\n\t\t}\n\t\tlocation := x.options.RuleBlock.GetNodeLocation(\"query\" + module.NodeLocationSelfValue)\n\t\t// TODO 2023-2-24 15:10:15 bug: Can't correct marks used in yaml | a line\n\t\treport := module.RenderErrorTemplate(errorTips, location)\n\t\treturn nil, diagnostics.AddErrorMsg(report)\n\t}\n\n\t// Create a Rule execution plan\n\treturn &RulePlan{\n\n\t\tModulePlan: x.options.ModulePlan,\n\t\tModule:     x.options.Module,\n\n\t\tRuleBlock: x.options.RuleBlock,\n\n\t\tBindingProviderName: bindingProviders[0],\n\t\tBindingTables:       bindingTables,\n\n\t\tQuery: query,\n\n\t\tRuleScope: ruleScope,\n\t}, diagnostics\n}\n\n// Extract the names of the tables it uses from the rendered rule Query\nfunc (x *RulePlanner) extractBinding(query string, tableToProviderMap map[string]string) (bindingProviders []string, bindingTables []string) {\n\tbindingProviderSet := make(map[string]struct{})\n\tbindingTableSet := make(map[string]struct{})\n\tinWord := false\n\tlastIndex := 0\n\tfor index, c := range query {\n\t\tif c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '_' || c >= '0' && c <= '9' {\n\t\t\tif !inWord {\n\t\t\t\tinWord = true\n\t\t\t\tlastIndex = index\n\t\t\t}\n\t\t} else {\n\t\t\tif inWord {\n\t\t\t\tword := query[lastIndex:index]\n\t\t\t\tif providerName, exists := tableToProviderMap[word]; exists {\n\t\t\t\t\tbindingTableSet[word] = struct{}{}\n\t\t\t\t\tbindingProviderSet[providerName] = struct{}{}\n\t\t\t\t}\n\t\t\t\tinWord = false\n\t\t\t}\n\t\t}\n\t}\n\n\tfor providerName := range bindingProviderSet {\n\t\tbindingProviders = append(bindingProviders, providerName)\n\t}\n\tfor tableName := range bindingTableSet {\n\t\tbindingTables = append(bindingTables, tableName)\n\t}\n\n\t// keep dictionary order, show it to console need keep same\n\tsort.Strings(bindingProviders)\n\tsort.Strings(bindingTables)\n\n\treturn\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// The old scheme does implicit provider association, while the new scheme does whitelist association\n//// Extracting the provider name from the table name used by the policy is an implicit association\n//func (x *RulePlanner) extractImplicitProvider(tablesName []string) ([]string, *schema.Diagnostics) {\n//\tdiagnostics := schema.NewDiagnostics()\n//\tproviderNameSet := make(map[string]struct{}, 0)\n//\tfor _, tableName := range tablesName {\n//\t\tsplit := strings.SplitN(tableName, \"_\", 2)\n//\t\tif len(split) != 2 {\n//\t\t\tdiagnostics.AddErrorMsg(\"can not found implicit provider name from table name %s\", tableName)\n//\t\t} else {\n//\t\t\tproviderNameSet[split[0]] = struct{}{}\n//\t\t}\n//\t}\n//\tproviderNameSlice := make([]string, 0)\n//\tfor providerName := range providerNameSet {\n//\t\tproviderNameSlice = append(providerNameSlice, providerName)\n//\t}\n//\treturn providerNameSlice, diagnostics\n//}\n//\n//// Extract the names of the tables it uses from the rendered rule Query\n//func (x *RulePlanner) extractTableNameSliceFromRuleQuery(s string, whitelistWordSet map[string]string) []string {\n//\tvar matchResultSet []string\n//\tinWord := false\n//\tlastIndex := 0\n//\tfor index, c := range s {\n//\t\tif c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '_' || c >= '0' && c <= '9' {\n//\t\t\tif !inWord {\n//\t\t\t\tinWord = true\n//\t\t\t\tlastIndex = index\n//\t\t\t}\n//\t\t} else {\n//\t\t\tif inWord {\n//\t\t\t\tword := s[lastIndex:index]\n//\t\t\t\tif _, exists := whitelistWordSet[word]; exists {\n//\t\t\t\t\tmatchResultSet = append(matchResultSet, word)\n//\t\t\t\t}\n//\t\t\t\tinWord = false\n//\t\t\t}\n//\t\t}\n//\t}\n//\treturn matchResultSet\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/planner/rule_planner_test.go",
    "content": "package planner\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/module_loader\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestRulePlanner_MakePlan(t *testing.T) {\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tt.Log(message.ToString())\n\t\t}\n\t})\n\tloader, err := module_loader.NewLocalDirectoryModuleLoader(&module_loader.LocalDirectoryModuleLoaderOptions{\n\t\tModuleLoaderOptions: &module_loader.ModuleLoaderOptions{\n\t\t\tMessageChannel: messageChannel,\n\t\t},\n\t\tModuleDirectory: \"./test_data/rule_planner\",\n\t})\n\tassert.Nil(t, err)\n\trootModule, b := loader.Load(context.Background())\n\tassert.True(t, b)\n\tmessageChannel.ReceiverWait()\n\n\tscope := NewScope()\n\tscope.SetVariable(\"account_id\", \"100000875657\")\n\ttableToProviderMap := map[string]string{\n\t\t\"aws_s3_buckets\":           \"aws\",\n\t\t\"aws_s3_bucket_cors_rules\": \"aws\",\n\t}\n\n\toptions := &RulePlannerOptions{\n\t\tModulePlan:         nil,\n\t\tModule:             rootModule,\n\t\tModuleScope:        scope,\n\t\tRuleBlock:          rootModule.RulesBlock[0],\n\t\tTableToProviderMap: tableToProviderMap,\n\t}\n\tplan, diagnostics := NewRulePlanner(options).MakePlan(context.Background())\n\tt.Log(diagnostics.ToString())\n\tassert.False(t, utils.HasError(diagnostics))\n\t//t.Log(json_util.ToJsonString(plan))\n\tassert.NotEmpty(t, plan.Query)\n\t//assert.NotEmpty(t, plan.BindingProviderName)\n\t//assert.NotEmpty(t, plan.BindingTables)\n\t//assert.Len(t, plan.BindingTables, 2)\n\n}\n"
  },
  {
    "path": "pkg/modules/planner/scope.go",
    "content": "package planner\n\nimport (\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n)\n\n// Scope Used to represent the scope of a module, scope have some variables can use\ntype Scope struct {\n\n\t// Variable in scope, may be is self declare, or extend from parent scope\n\tvariablesMap map[string]any\n\n\t// The provider configuration information in scope, now extend from parent module, do not support custom by self\n\tproviderConfigBlockSlice []*module.ProviderBlock\n}\n\n// ExtendScope create scope from exists scope\nfunc ExtendScope(scope *Scope) *Scope {\n\tsubScope := NewScope()\n\tsubScope.Extend(scope)\n\treturn subScope\n}\n\n// NewScope create new scope\nfunc NewScope() *Scope {\n\treturn &Scope{\n\t\tvariablesMap: make(map[string]any),\n\t}\n}\n\n// Extend current scope extend other scope\nfunc (x *Scope) Extend(scope *Scope) {\n\tfor key, value := range scope.variablesMap {\n\t\tif _, exists := x.variablesMap[key]; exists {\n\t\t\tcontinue\n\t\t}\n\t\tx.variablesMap[key] = value\n\t}\n}\n\n// Clone Make a copy of the current scope\nfunc (x *Scope) Clone() *Scope {\n\n\tnewVariablesMap := make(map[string]any)\n\tfor key, value := range x.variablesMap {\n\t\tnewVariablesMap[key] = value\n\t}\n\n\treturn &Scope{\n\t\tvariablesMap:             newVariablesMap,\n\t\tproviderConfigBlockSlice: x.providerConfigBlockSlice,\n\t}\n}\n\n// GetVariable Gets the value of a variable\nfunc (x *Scope) GetVariable(variableName string) (any, bool) {\n\tvalue, exists := x.variablesMap[variableName]\n\treturn value, exists\n}\n\n// SetVariable Declare a variable\nfunc (x *Scope) SetVariable(variableName string, variableValue any) any {\n\toldValue := x.variablesMap[variableName]\n\tx.variablesMap[variableName] = variableValue\n\treturn oldValue\n}\n\n// SetVariables Batch declaration variable\nfunc (x *Scope) SetVariables(variablesMap map[string]any) {\n\tfor variableName, variableValue := range variablesMap {\n\t\tx.variablesMap[variableName] = variableValue\n\t}\n}\n\n// SetVariableIfNotExists Declared only if the variable does not exist\nfunc (x *Scope) SetVariableIfNotExists(variableName string, variableValue any) bool {\n\tif _, exists := x.variablesMap[variableName]; exists {\n\t\treturn false\n\t}\n\tx.variablesMap[variableName] = variableValue\n\treturn true\n}\n\n// RenderingTemplate Rendering the template using the moduleScope of the current module\nfunc (x *Scope) RenderingTemplate(templateName, templateString string) (string, error) {\n\t// TODO a problem in here, they call it \"no value\" ?\n\treturn utils.RenderingTemplate(templateName, templateString, x.variablesMap)\n}\n"
  },
  {
    "path": "pkg/modules/planner/test_data/provider_fetch_planner/modules.yaml",
    "content": "selefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\"\n"
  },
  {
    "path": "pkg/modules/planner/test_data/rule_planner/test.yaml",
    "content": "rules:\n  - name: bucket_is_not_configured_with_cors_rules\n    query: |\n      SELECT\n        DISTINCT(a1.*)\n      FROM\n        aws_s3_buckets AS a1 FULL\n        OUTER JOIN aws_s3_bucket_cors_rules AS a2 ON a1.selefra_id = a2.aws_s3_buckets_selefra_id\n      WHERE\n        aws_s3_buckets_selefra_id IS NULL;\n    labels:\n      resource_account_id: '{{.account_id}}'\n      resource_id: '{{.arn}}'\n      resource_region: '{{.region}}'\n      resource_type: S3\n      bucket_url: 'https://{{.name}}.s3.{{.region}}.amazonaws.com'\n    metadata:\n      author: Selefra\n      description: Ensure to configure secure CORS rules for the Bucket.\n      id: SF010117\n      provider: AWS\n      remediation: remediation/s3/bucket_is_not_configured_with_cors_rules.md\n      severity: Low\n      tags:\n        - Security\n        - Misconfiguration\n      title: S3 Bucket is not configured with CORS rules\n    output: \"S3 Bucket is not configured with CORS rules, arn: {{.arn}}\"\n\nselefra:\n  cloud:\n    project: example_project\n    organization: example_org\n    hostname: app.selefra.io\n  connection:\n    type: postgres\n    username: postgres\n    password: pass\n    host: localhost\n    port: \"5432\"\n    database: postgres\n    sslmode: disable\n  name: example_project\n  cli_version: v0.0.1\n  providers:\n    - name: aws\n      source: selefra/aws\n      version: \">=0.0.9,<=0.0.10\"\n    - name: gcp\n      source: selefra/gcp\n      version: \">=0.0.9,<=0.0.10\"\n\n#modules:\n#  - name: Misconfiguration-S3\n#    uses:\n#      - ./sub_module_b/sub_module_b_1\n#      - ./sub_module_b/sub_module_b_2\n#      - ./sub_module_a/sub_module_a_1\n#      - ./sub_module_a/sub_module_a_2\n#  - name: example_module\n#    #    uses: ./rules/\n#    input:\n#      name: selefra\n#\n#variables:\n#  - key: test\n#    default:\n#      a: 1\n#      b: 1\n#      c: 1\n#\n#providers:\n#  - name: aws_01\n#    cache: 1d\n#    provider: aws\n#    resources:\n#      - aws_s3_buckets\n#      - aws_s3_accounts\n#    #  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n#    accounts:\n#      #     Optional. User identification\n#      - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n#        #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n#        shared_config_profile: < PROFILE_NAME >\n#        #    Optional. Location of shared configuration files\n#        shared_config_files:\n#          - <FILE_PATH>\n#        #   Optional. Location of shared credentials files\n#        shared_credentials_files:\n#          - <FILE_PATH>\n#        #    Optional. Role ARN we want to assume when accessing this account\n#        role_arn: < YOUR_ROLE_ARN >\n#        #    Optional. Named role session to grab specific operation under the assumed role\n#        role_session_name: <SESSION_NAME>\n#        #    Optional. Any outside of the org account id that has additional control\n#        external_id: <ID>\n#        #    Optional. Designated region of servers\n#        default_region: <REGION_CODE>\n#        #    Optional. by default assumes all regions\n#        regions:\n#          - us-east-1\n#          - us-west-2\n#    #    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n#    max_attempts: 10\n#    #    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n#    max_backoff: 30"
  },
  {
    "path": "pkg/modules/planner/version_vote.go",
    "content": "package planner\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/hashicorp/go-version\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/registry\"\n\tselefraVersion \"github.com/selefra/selefra/pkg/version\"\n\t\"strings\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProviderVersionVoteService When multiple versions of the same provider are available for a module, which version should be used? So take a vote!\ntype ProviderVersionVoteService struct {\n\n\t// <providerName, ProviderVote>\n\tproviderVersionVoteMap map[string]*ProviderVote\n}\n\nfunc NewProviderVersionVoteService() *ProviderVersionVoteService {\n\treturn &ProviderVersionVoteService{\n\t\tproviderVersionVoteMap: make(map[string]*ProviderVote),\n\t}\n}\n\n// Vote Vote on the module to see which version should be used\nfunc (x *ProviderVersionVoteService) Vote(ctx context.Context, module *module.Module) *schema.Diagnostics {\n\tdiagnostics := schema.NewDiagnostics()\n\tfor _, requiredProviderBlock := range module.SelefraBlock.RequireProvidersBlock {\n\t\tif _, exists := x.providerVersionVoteMap[requiredProviderBlock.Source]; !exists {\n\t\t\tproviderVote, d := NewProviderVote(ctx, requiredProviderBlock)\n\t\t\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\t\t\treturn diagnostics\n\t\t\t}\n\t\t\tx.providerVersionVoteMap[requiredProviderBlock.Source] = providerVote\n\t\t}\n\t\td := x.providerVersionVoteMap[requiredProviderBlock.Source].Vote(module, requiredProviderBlock)\n\t\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\t\treturn diagnostics\n\t\t}\n\t}\n\treturn diagnostics\n}\n\n// TODO\n\n// TODO\n//// GiveMeResult Query voting result\n//func (x *ProviderVersionVoteService) GiveMeResult(providerName string) (string, *schema.Diagnostics) {\n//\tvote, exists := x.providerVersionVoteMap[providerName]\n//\tif !exists {\n//\t\treturn \"\", schema.NewDiagnostics().AddErrorMsg(\"\")\n//\t}\n//\treturn \"\",\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ProviderVote struct {\n\tTotalVoteTimes      int\n\tProviderName        string\n\tVersionVoteCountMap map[string]*VersionVoteSummary\n\tproviderMetadata    *registry.ProviderMetadata\n}\n\nfunc NewProviderVote(ctx context.Context, requiredProviderBlock *module.RequireProviderBlock) (*ProviderVote, *schema.Diagnostics) {\n\tx := &ProviderVote{}\n\tx.ProviderName = requiredProviderBlock.Source\n\td := x.InitProviderVersionVoteCountMap(ctx, requiredProviderBlock)\n\treturn x, d\n}\n\n// Vote Each module can participate in voting\nfunc (x *ProviderVote) Vote(voteModule *module.Module, requiredProviderBlock *module.RequireProviderBlock) *schema.Diagnostics {\n\n\tx.TotalVoteTimes++\n\n\t// If it is the latest version, replace it with the latest version\n\tversionString := requiredProviderBlock.Version\n\tif selefraVersion.IsLatestVersion(versionString) {\n\t\tversionString = x.providerMetadata.LatestVersion\n\t}\n\tconstraint, err := version.NewConstraint(versionString)\n\tif err != nil {\n\t\tlocation := requiredProviderBlock.GetNodeLocation(\"version\" + module.NodeLocationSelfValue)\n\t\treport := module.RenderErrorTemplate(fmt.Sprintf(\"required provider version constraint parse failed: %s\", versionString), location)\n\t\treturn schema.NewDiagnostics().AddErrorMsg(report)\n\t}\n\n\tvoteSuccessCount := 0\n\tfor _, voteSummary := range x.VersionVoteCountMap {\n\t\tif selefraVersion.IsConstraintsAllow(constraint, voteSummary.ProviderVersion) {\n\t\t\tvoteSummary.VoteSet[voteModule] = struct{}{}\n\t\t\tvoteSuccessCount++\n\t\t}\n\t}\n\n\tif voteSuccessCount == 0 {\n\t\tcanUseVersions := selefraVersion.Sort(x.GetVoteVersions())\n\t\tlocation := requiredProviderBlock.GetNodeLocation(\"version\" + module.NodeLocationSelfValue)\n\t\terrorTips := fmt.Sprintf(\"required provider version constraint %s , no version was found that met the requirements, can use versions: %s\", versionString, strings.Join(canUseVersions, \", \"))\n\t\treport := module.RenderErrorTemplate(errorTips, location)\n\t\treturn schema.NewDiagnostics().AddErrorMsg(report)\n\t}\n\n\treturn nil\n}\n\n// InitProviderVersionVoteCountMap Obtain the Provider versions from Registry and vote for these products later\nfunc (x *ProviderVote) InitProviderVersionVoteCountMap(ctx context.Context, block *module.RequireProviderBlock) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tx.VersionVoteCountMap = make(map[string]*VersionVoteSummary)\n\n\t// It's not actually going to download, so it doesn't matter what the path is here\n\toptions := registry.NewProviderGithubRegistryOptions(\"./\")\n\tprovider, err := registry.NewProviderGithubRegistry(options)\n\tif err != nil {\n\t\treturn diagnostics.AddErrorMsg(\"create provider github registry failed: %s\", err.Error())\n\t}\n\tmetadata, err := provider.GetMetadata(ctx, registry.NewProvider(x.ProviderName, selefraVersion.VersionLatest))\n\tif err != nil {\n\t\tlocation := block.GetNodeLocation(\"source\" + module.NodeLocationSelfValue)\n\t\treport := module.RenderErrorTemplate(fmt.Sprintf(\"get provider %s meta information from registry error: %s\", x.ProviderName, err.Error()), location)\n\t\treturn diagnostics.AddErrorMsg(report)\n\t}\n\tif len(metadata.Versions) == 0 {\n\t\treturn diagnostics.AddErrorMsg(\"provider %s registry metadata not found any version\", x.ProviderName)\n\t}\n\tfor _, providerVersion := range metadata.Versions {\n\t\tsummary, d := NewVoteSummary(x.ProviderName, providerVersion)\n\t\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\t\treturn diagnostics\n\t\t}\n\t\tx.VersionVoteCountMap[providerVersion] = summary\n\t}\n\tx.providerMetadata = metadata\n\treturn diagnostics\n}\n\n// GetWinnersVersionVoteSummary Get the version that wins the vote. There may be multiple versions that win at the same time\nfunc (x *ProviderVote) GetWinnersVersionVoteSummary() map[string]*VersionVoteSummary {\n\tm := make(map[string]*VersionVoteSummary)\n\tfor versionString, voteSummary := range x.VersionVoteCountMap {\n\t\tif len(voteSummary.VoteSet) == x.TotalVoteTimes {\n\t\t\tm[versionString] = voteSummary\n\t\t}\n\t}\n\treturn m\n}\n\n// GetWinnersVersionSlice Gets the version numbers of all versions that won the vote\nfunc (x *ProviderVote) GetWinnersVersionSlice() []string {\n\tversionSlice := make([]string, 0)\n\tfor versionString := range x.GetWinnersVersionVoteSummary() {\n\t\tversionSlice = append(versionSlice, versionString)\n\t}\n\treturn versionSlice\n}\n\n// GetVoteVersions Get the versions that are voted on\nfunc (x *ProviderVote) GetVoteVersions() []string {\n\tversionStringSlice := make([]string, 0)\n\tfor versionString := range x.VersionVoteCountMap {\n\t\tversionStringSlice = append(versionStringSlice, versionString)\n\t}\n\treturn versionStringSlice\n}\n\n// ToModuleAllowProviderVersionMap Convert to which versions of this Provider are supported by the module\nfunc (x *ProviderVote) ToModuleAllowProviderVersionMap() map[*module.Module][]string {\n\tmoduleUseProviderVersionMap := make(map[*module.Module][]string, 0)\n\tfor providerVersion, voteSummary := range x.VersionVoteCountMap {\n\t\tfor module := range voteSummary.VoteSet {\n\t\t\tversionSlice := moduleUseProviderVersionMap[module]\n\t\t\tmoduleUseProviderVersionMap[module] = append(versionSlice, providerVersion)\n\t\t}\n\t}\n\treturn moduleUseProviderVersionMap\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype VersionVoteSummary struct {\n\n\t// Which version\n\tProviderVersion *version.Version\n\n\t// How many votes did you get\n\tVoteSet map[*module.Module]struct{}\n}\n\nfunc NewVoteSummary(providerName, providerVersion string) (*VersionVoteSummary, *schema.Diagnostics) {\n\tnewVersion, err := version.NewVersion(providerVersion)\n\tif err != nil {\n\t\treturn nil, schema.NewDiagnostics().AddErrorMsg(\"parse provider %s version %s error: %s\", providerName, providerVersion, err.Error())\n\t}\n\treturn &VersionVoteSummary{\n\t\tProviderVersion: newVersion,\n\t\tVoteSet:         make(map[*module.Module]struct{}),\n\t}, nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/modules/planner/version_vote_test.go",
    "content": "package planner\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc TestNewProviderVersionVoteService(t *testing.T) {\n\n\t// case 1: To be able to pick a definitive version\n\trootModule := randomModule(\"v0.0.1, v0.0.2\")\n\trootModule.SubModules = append(rootModule.SubModules, randomModule(\"v0.0.2, v0.0.3\"))\n\tservice := NewProviderVersionVoteService()\n\trootModule.Traversal(context.Background(), func(ctx context.Context, traversalContext *module.TraversalContext) bool {\n\t\td := service.Vote(context.Background(), traversalContext.Module)\n\t\tassert.False(t, utils.HasError(d))\n\t\treturn true\n\t})\n\tslice := service.providerVersionVoteMap[\"aws\"].GetWinnersVersionSlice()\n\tassert.Equal(t, []string{\"v0.0.2\"}, slice)\n\n\t// case 2: The ability to select multiple explicit versions\n\trootModule = randomModule(\"v0.0.1, v0.0.2, v0.0.3\")\n\trootModule.SubModules = append(rootModule.SubModules, randomModule(\"v0.0.2, v0.0.3, v0.0.4\"))\n\tservice = NewProviderVersionVoteService()\n\trootModule.Traversal(context.Background(), func(ctx context.Context, traversalContext *module.TraversalContext) bool {\n\t\td := service.Vote(context.Background(), traversalContext.Module)\n\t\tassert.False(t, utils.HasError(d))\n\t\treturn true\n\t})\n\tslice = service.providerVersionVoteMap[\"aws\"].GetWinnersVersionSlice()\n\tsort.Strings(slice)\n\tassert.Equal(t, []string{\"v0.0.2\", \"v0.0.3\"}, slice)\n\n\t// case 3: No definitive version could be selected\n\trootModule = randomModule(\"v0.0.1, v0.0.2\")\n\trootModule.SubModules = append(rootModule.SubModules, randomModule(\"v0.0.4\"))\n\tservice = NewProviderVersionVoteService()\n\trootModule.Traversal(context.Background(), func(ctx context.Context, traversalContext *module.TraversalContext) bool {\n\t\td := service.Vote(context.Background(), traversalContext.Module)\n\t\tassert.False(t, utils.HasError(d))\n\t\treturn true\n\t})\n\tslice = service.providerVersionVoteMap[\"aws\"].GetWinnersVersionSlice()\n\tassert.Equal(t, []string{}, slice)\n\n}\n\nfunc randomModule(requiredVersion string) *module.Module {\n\n\trequireProviderBlock := module.NewRequireProviderBlock()\n\trequireProviderBlock.Source = \"aws\"\n\trequireProviderBlock.Name = \"aws\"\n\trequireProviderBlock.Version = requiredVersion\n\n\trootModule := module.NewModule()\n\trootModule.SelefraBlock = &module.SelefraBlock{\n\t\tRequireProvidersBlock: []*module.RequireProviderBlock{\n\t\t\trequireProviderBlock,\n\t\t},\n\t}\n\n\treturn rootModule\n}\n"
  },
  {
    "path": "pkg/oci/postgresql_installer.go",
    "content": "package oci\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"github.com/containerd/containerd/remotes/docker\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/global\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"io\"\n\t\"oras.land/oras-go/pkg/content\"\n\t\"oras.land/oras-go/pkg/oras\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst (\n\n\t// DefaultPostgreSQLPasswd The default password of the startup instance, Maybe I should use a stronger random password?\n\tDefaultPostgreSQLPasswd = \"pass\"\n\n\tDefaultPostgreSQLPort = 15432\n)\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n//type ProgressTracker interface {\n//\n//\t// Begin Ready for installation\n//\tBegin()\n//\n//\t// InstallBegin The system starts to install postgresql\n//\tInstallBegin(ctx context.Context, postgresqlInstallDirectory string, d *schema.Diagnostics)\n//\n//\t// InstallEnd Installing postgresql ends\n//\tInstallEnd(ctx context.Context, postgresqlInstallDirectory string, d *schema.Diagnostics)\n//\n//\t// RunCommand Execute the command\n//\tRunCommand(command string, args ...string)\n//\n//\t// Start a postgresql instance\n//\tStart(stdout, stderr string, diagnostics *schema.Diagnostics)\n//\n//\t// End of installation\n//\tEnd(isSuccess bool)\n//}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// PostgreSQLDownloaderOptions Download option\ntype PostgreSQLDownloaderOptions struct {\n\n\t// Which directory to store it in after downloading\n\tDownloadDirectory string\n\n\t//// Used to receive notifications when downloading progress updates to track progress\n\t//ProgressTracker ProgressTracker\n\n\tMessageChannel *message.Channel[*schema.Diagnostics]\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\ntype PostgreSQLInstaller struct {\n\toptions *PostgreSQLDownloaderOptions\n}\n\nfunc NewPostgreSQLDownloader(options *PostgreSQLDownloaderOptions) *PostgreSQLInstaller {\n\treturn &PostgreSQLInstaller{\n\t\toptions: options,\n\t}\n}\n\nfunc (x *PostgreSQLInstaller) Run(ctx context.Context) bool {\n\n\tdefer func() {\n\t\tx.options.MessageChannel.SenderWaitAndClose()\n\t}()\n\n\t// Make sure that postgresql exists locally. If not, install one\n\tif !x.IsInstalled() {\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Postgresql is not installed, it will automatically install...\"))\n\t\tif !x.Install(ctx) {\n\t\t\treturn false\n\t\t}\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Postgresql is installed successfully\"))\n\t}\n\n\t_ = x.Stop()\n\n\treturn x.Start()\n}\n\n//func loadBar(doneFlag *bool) {\n//\tgo func() {\n//\t\tdotLen := 0\n//\t\tfor *doneFlag {\n//\t\t\ttime.Sleep(1 * time.Second)\n//\t\t\tif *doneFlag {\n//\t\t\t\tdotLen++\n//\t\t\t\tcli_ui.Infof(\"\\rWaiting for DB to download %s\", strings.Repeat(\".\", dotLen%6)+strings.Repeat(\" \", 6-dotLen%6))\n//\t\t\t}\n//\t\t}\n//\t}()\n//}\n\nfunc (x *PostgreSQLInstaller) DownloadOCIImage(ctx context.Context) bool {\n\n\t// postgresql oci file installation directory\n\timageDownloadURL := global.PkgBasePath + runtime.GOOS + global.PkgTag\n\n\t// ensure install directory exists\n\tpostgresqlDirectory := x.buildPgInstallDirectoryPath()\n\t_ = os.MkdirAll(postgresqlDirectory, 0755)\n\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Download postgresql oci image from %s to %s ...\", imageDownloadURL, postgresqlDirectory))\n\n\tfileStore := content.NewFile(postgresqlDirectory)\n\tdockerResolver := docker.NewResolver(docker.ResolverOptions{})\n\t_, err := oras.Copy(ctx, dockerResolver, imageDownloadURL, fileStore, postgresqlDirectory)\n\tif err != nil {\n\t\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"OCI install postgresql failed, download OCI image error: %s\", err.Error()))\n\t\treturn false\n\t}\n\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Download postgresql OCI image success\"))\n\n\treturn true\n}\n\n// IsInstalled Check whether postgresql is installed\nfunc (x *PostgreSQLInstaller) IsInstalled() bool {\n\t// If the executable exists, it is considered installed\n\treturn utils.ExistsFile(x.buildPgCtlExecutePath())\n}\n\n// Install postgresql locally\nfunc (x *PostgreSQLInstaller) Install(ctx context.Context) bool {\n\n\tif x.IsInstalled() {\n\t\treturn true\n\t}\n\n\tif !x.DownloadOCIImage(ctx) {\n\t\treturn false\n\t}\n\n\tx.options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Begin init postgresql...\"))\n\n\tdiagnostics := schema.NewDiagnostics()\n\t_ = utils.EnsureDirectoryExists(x.buildDataDirectory())\n\t// for debug\n\t//fmt.Println(\"path: \" + x.buildDataDirectory())\n\tstdout, stderr, err := utils.RunCommand(x.buildInitExecutePath(),\n\t\t\"-D\", x.buildDataDirectory(),\n\t\t\"-U\", \"postgres\",\n\t\t\"-E\", \"UTF-8\",\n\t\t\"--locale\", \"en_US.UTF-8\")\n\tif err != nil {\n\t\tdiagnostics.AddErrorMsg(\"Init postgres failed: %s\", err.Error())\n\t} else {\n\t\tdiagnostics.AddInfo(\"Init postgres success\")\n\t}\n\tdiagnostics = x.fixDiagnostics(diagnostics)\n\tif stdout != \"\" {\n\t\tdiagnostics.AddInfo(stdout)\n\t}\n\tif stderr != \"\" {\n\t\tdiagnostics.AddErrorMsg(stderr)\n\t}\n\n\tdiagnostics.AddDiagnostics(x.ChangeConfigFilePort(15432))\n\tdiagnostics = x.fixDiagnostics(diagnostics)\n\n\tx.options.MessageChannel.Send(diagnostics)\n\n\treturn utils.NotHasError(diagnostics)\n}\n\n// Start the postgresql database\nfunc (x *PostgreSQLInstaller) Start() bool {\n\tdiagnostics := schema.NewDiagnostics()\n\tstdout, stderr, err := utils.RunCommand(x.buildPgCtlExecutePath(), \"-D\", x.buildDataDirectory(), \"-l\", x.buildPgLogFilePath(), \"start\")\n\tif err != nil {\n\t\tdiagnostics.AddErrorMsg(\"Start postgresql error: %s\", err.Error())\n\t} else {\n\t\t//diagnostics.AddInfo(\"Start postgresql success\")\n\t}\n\tif stdout != \"\" {\n\t\t//diagnostics.AddInfo(stdout)\n\t}\n\tif stderr != \"\" {\n\t\tdiagnostics.AddErrorMsg(stderr)\n\t}\n\tx.options.MessageChannel.Send(diagnostics)\n\treturn utils.NotHasError(diagnostics)\n}\n\nfunc (x *PostgreSQLInstaller) Stop() bool {\n\tdiagnostics := schema.NewDiagnostics()\n\tstdout, stderr, err := utils.RunCommand(x.buildPgCtlExecutePath(), \"-D\", x.buildDataDirectory(), \"stop\")\n\tif err != nil {\n\t\tdiagnostics.AddErrorMsg(\"Stop postgresql error: %s\", err.Error())\n\t} else {\n\t\tdiagnostics.AddInfo(\"Stop postgresql success\")\n\t}\n\tif stderr != \"\" {\n\t\tdiagnostics.AddErrorMsg(stderr)\n\t}\n\tif stdout != \"\" {\n\t\tdiagnostics.AddInfo(stdout)\n\t}\n\treturn utils.HasError(diagnostics)\n}\n\n// may be\n// [ error ]\n// WARNING: enabling \"trust\" authentication for local connections\n// You can change this by editing pg_hba.conf or using the option -A, or\n// --auth-local and --auth-host, the next time you run initdb.\nfunc (x *PostgreSQLInstaller) fixDiagnostics(diagnostics *schema.Diagnostics) *schema.Diagnostics {\n\tif diagnostics == nil {\n\t\treturn nil\n\t}\n\tnewDiagnostics := schema.NewDiagnostics()\n\t// WARNING\n\tfor _, d := range diagnostics.GetDiagnosticSlice() {\n\t\tlevel := d.Level()\n\t\tcontent := strings.TrimSpace(d.Content())\n\t\tif strings.HasPrefix(content, \"WARNING:\") {\n\t\t\tlevel = schema.DiagnosisLevelWarn\n\t\t}\n\t\tnewDiagnostics.AddDiagnostic(schema.NewDiagnostic(level, content))\n\t}\n\treturn newDiagnostics\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// get the postgresql installation directory\nfunc (x *PostgreSQLInstaller) buildPgInstallDirectoryPath() string {\n\treturn filepath.Join(x.options.DownloadDirectory, \"oci/postgresql\")\n}\n\n// postgresql data storage path\nfunc (x *PostgreSQLInstaller) buildDataDirectory() string {\n\treturn filepath.Join(x.buildPgInstallDirectoryPath(), \"pgsql/data\")\n}\n\n// get the location of the initdb exec file path\nfunc (x *PostgreSQLInstaller) buildInitExecutePath() string {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn filepath.Join(x.buildPgInstallDirectoryPath(), \"pgsql/bin/initdb.exe\")\n\t} else {\n\t\treturn filepath.Join(x.buildPgInstallDirectoryPath(), \"pgsql/bin/initdb\")\n\t}\n}\n\n// get the execution path of the postgresql ctl file\nfunc (x *PostgreSQLInstaller) buildPgCtlExecutePath() string {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn filepath.Join(x.buildPgInstallDirectoryPath(), \"pgsql/bin/pg_ctl.exe\")\n\t} else {\n\t\treturn filepath.Join(x.buildPgInstallDirectoryPath(), \"pgsql/bin/pg_ctl\")\n\t}\n}\n\n// get the postgresql data location\nfunc (x *PostgreSQLInstaller) buildPgConfigFilePath() string {\n\treturn filepath.Join(x.buildPgInstallDirectoryPath(), \"pgsql/data/postgresql.conf\")\n}\n\n// get the location where postgresql logs are stored\nfunc (x *PostgreSQLInstaller) buildPgLogFilePath() string {\n\treturn filepath.Join(x.buildPgInstallDirectoryPath(), \"pgsql/logfile\")\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// TODO Parameter is not used\n// ChangeConfigFilePort Change the port number in the configuration file\nfunc (x *PostgreSQLInstaller) ChangeConfigFilePort(port int) *schema.Diagnostics {\n\n\t// read config file\n\tdiagnostics := schema.NewDiagnostics()\n\tfile, err := os.OpenFile(x.buildPgConfigFilePath(), os.O_RDWR, 0666)\n\tif err != nil {\n\t\treturn diagnostics.AddErrorMsg(\"Run postgresql error, open config file %s error: %s\", x.buildPgConfigFilePath(), err.Error())\n\t}\n\tdefer file.Close()\n\n\treader := bufio.NewReader(file)\n\tpos := int64(0)\n\tfor {\n\t\tline, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\treturn diagnostics.AddErrorMsg(\"OCI run postgresql error, open config file %s error: %s\", x.buildPgConfigFilePath(), err.Error())\n\t\t\t}\n\t\t}\n\t\tif strings.Contains(line, \"#port = 5432\") {\n\t\t\tdefaultPort := \"15432\"\n\t\t\tportBytes := []byte(\"port = \" + defaultPort)\n\t\t\t_, err := file.WriteAt(portBytes, pos)\n\t\t\tif err != nil {\n\t\t\t\treturn diagnostics.AddErrorMsg(\"OCI run postgresql error, change config file %s error: %s\", x.buildPgConfigFilePath(), err.Error())\n\t\t\t}\n\t\t}\n\t\tpos += int64(len(line))\n\t}\n\treturn nil\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/oci/postgresql_installer_test.go",
    "content": "package oci\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/config\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\n// Failed to pass the test on Windows. Procedure\nfunc TestPostgreSQLInstaller_Run1(t *testing.T) {\n\n\t//downloadWorkspace := \"./test_download\"\n\tdownloadWorkspace, err := config.GetDefaultDownloadCacheDirectory()\n\tassert.Nil(t, err)\n\terr = utils.EnsureDirectoryNotExists(downloadWorkspace)\n\tassert.Nil(t, err)\n\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tif utils.IsNotEmpty(message) {\n\t\t\tfmt.Println(message.ToString())\n\t\t}\n\t})\n\tdownloader := NewPostgreSQLDownloader(&PostgreSQLDownloaderOptions{\n\t\tMessageChannel:    messageChannel,\n\t\tDownloadDirectory: downloadWorkspace,\n\t})\n\tisRunSuccess := downloader.Run(context.Background())\n\tmessageChannel.ReceiverWait()\n\tassert.True(t, isRunSuccess)\n}\n"
  },
  {
    "path": "pkg/oci/test_data/oci.sh",
    "content": "#!/usr/bin/env bash\n#######################################################################################################################\n#                                                                                                                     #\n#                              This script helps you test interactive programs                                        #\n#                                                                                                                     #\n#                                                                                                                     #\n#                                                                                                   Version: 0.0.1    #\n#                                                                                                                     #\n#######################################################################################################################\n\n# for command `selefra init`\ncd ../../../\ngo build\nrm -rf ./test\nmkdir test\nmv selefra.exe ./test\ncd test\necho \"begin run command selefra init\"\n./selefra.exe init $@\n\n"
  },
  {
    "path": "pkg/plugin/plugin.go",
    "content": "package plugin\n\nimport (\n\t\"fmt\"\n\t\"github.com/hashicorp/go-plugin\"\n\t\"github.com/selefra/selefra-provider-sdk/grpc/serve\"\n\t\"github.com/selefra/selefra-provider-sdk/grpc/shard\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n\t\"os\"\n\t\"os/exec\"\n)\n\nconst (\n\tprefixManaged   = \"managed\"\n\tprefixUnmanaged = \"unmanaged\"\n\tdefaultAlias    = \"default\"\n)\n\ntype Plugin interface {\n\tName() string\n\tVersion() string\n\tProtocolVersion() int\n\tProvider() shard.ProviderClient\n\tClose()\n}\n\ntype pluginBase struct {\n\tname     string\n\tversion  string\n\tclient   *plugin.Client\n\tprovider shard.ProviderClient\n}\n\nfunc (p pluginBase) Name() string {\n\treturn p.name\n}\n\nfunc (p pluginBase) Provider() shard.ProviderClient {\n\treturn p.provider\n}\n\nfunc (p pluginBase) Version() string {\n\treturn p.version\n}\n\ntype managedPlugin struct {\n\tpluginBase\n}\n\nfunc (m managedPlugin) ProtocolVersion() int {\n\treturn m.client.NegotiatedVersion()\n}\n\nfunc (m managedPlugin) Close() {\n\tif m.client == nil {\n\t\treturn\n\t}\n\tm.client.Kill()\n}\n\ntype unmanagedPlugin struct {\n\tconfig *plugin.ReattachConfig\n\tpluginBase\n}\n\nfunc (u unmanagedPlugin) ProtocolVersion() int {\n\treturn -1\n}\n\nfunc (u unmanagedPlugin) Close() {}\n\n//type Plugins map[string]Plugin\n//\n//func (p Plugins) Get(alias string, name string, version string) Plugin {\n//\talias = checkAlias(alias)\n//\n//\t// 1. unmanagedPlugin\n//\tif v, ok := p[fmt.Sprintf(unmanagedFormat, alias, name, version)]; ok {\n//\t\treturn v\n//\t}\n//\t// 2. managedPlugin\n//\tif v, ok := p[fmt.Sprintf(managedFormat, alias, name, version)]; ok {\n//\t\treturn v\n//\t}\n//\treturn nil\n//}\n\nfunc getProvider(name string, client *plugin.Client) (shard.ProviderClient, error) {\n\tgrpcClient, err := client.Client()\n\tif err != nil {\n\t\tclient.Kill()\n\t\treturn nil, err\n\t}\n\traw, err := grpcClient.Dispense(\"provider\")\n\tif err != nil {\n\t\tclient.Kill()\n\t\treturn nil, err\n\t}\n\n\tprovider, ok := raw.(shard.ProviderClient)\n\tif !ok {\n\t\tclient.Kill()\n\t\treturn nil, fmt.Errorf(\"plugin %s is not a provider\", name)\n\t}\n\treturn provider, nil\n}\n\nfunc checkAlias(alias string) string {\n\tif alias == \"\" {\n\t\treturn defaultAlias\n\t}\n\treturn alias\n}\n\nfunc NewManagedPlugin(filepath string, name string, version string, alias string, env []string) (Plugin, error) {\n\t// managedFormat prefixManaged:alias:name:version (e.g. managed:alias:foo:1.0.0)\n\tmanagedFormat := fmt.Sprintf(\"%s:%%s:%%s:%%s\", prefixManaged)\n\n\tdefaultLogger, _ := logger.NewLogger(logger.Config{\n\t\tFileLogEnabled:    true,\n\t\tConsoleLogEnabled: false,\n\t\tEncodeLogsAsJson:  true,\n\t\tConsoleNoColor:    true,\n\t\tSource:            \"plugin\",\n\t\tDirectory:         \"logs\",\n\t\tLevel:             \"debug\",\n\t})\n\n\talias = checkAlias(alias)\n\tcmd := exec.Command(filepath)\n\tcmd.Env = append(cmd.Env, env...)\n\tclient := plugin.NewClient(&plugin.ClientConfig{\n\t\tSyncStdout:       os.Stdout,\n\t\tSyncStderr:       os.Stderr,\n\t\tHandshakeConfig:  serve.HandSharkConfig,\n\t\tVersionedPlugins: shard.VersionPluginMap,\n\t\tManaged:          true,\n\t\tCmd:              cmd,\n\t\tAllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},\n\t\tLogger:           defaultLogger,\n\t})\n\tprovider, err := getProvider(name, client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &managedPlugin{\n\t\tpluginBase: pluginBase{\n\t\t\tname:     fmt.Sprintf(managedFormat, alias, name, version),\n\t\t\tclient:   client,\n\t\t\tprovider: provider,\n\t\t\tversion:  version,\n\t\t},\n\t}, nil\n}\n\nfunc NewUnmanagedPlugin(alias string, name string, version string, config *plugin.ReattachConfig) (Plugin, error) {\n\talias = checkAlias(alias)\n\t// unmanagedFormat prefixUnmanaged:alias:name:version (e.g. unmanaged:alias:foo:1.0.0)\n\tunmanagedFormat := fmt.Sprintf(\"%s:%%s:%%s:%%s\", prefixUnmanaged)\n\tclient := plugin.NewClient(&plugin.ClientConfig{\n\t\tHandshakeConfig:  serve.HandSharkConfig,\n\t\tPlugins:          shard.PluginMap,\n\t\tReattach:         config,\n\t\tAllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},\n\t\tSyncStderr:       os.Stderr,\n\t\tSyncStdout:       os.Stdout,\n\t})\n\tprovider, err := getProvider(name, client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &unmanagedPlugin{\n\t\tconfig: config,\n\t\tpluginBase: pluginBase{\n\t\t\tname:     fmt.Sprintf(unmanagedFormat, alias, name, version),\n\t\t\tclient:   client,\n\t\t\tprovider: provider,\n\t\t\tversion:  version,\n\t\t},\n\t}, nil\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/exists.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n)\n\n// IsProviderInstalled Used to query whether the provider is installed locally\nfunc (x *LocalProvidersManager) IsProviderInstalled(ctx context.Context, provider *LocalProvider) (bool, *schema.Diagnostics) {\n\n\t// If it is not the latest version, you can directly determine the path\n\tif !provider.IsLatestVersion() {\n\t\tpath := x.buildLocalProviderVersionPath(provider.Name, provider.Version)\n\t\treturn utils.Exists(path), nil\n\t}\n\n\t// If it is the latest version, obtain the version number of the latest version\n\tmetadata, err := x.providerRegistry.GetMetadata(ctx, provider.Provider)\n\tif err != nil {\n\t\treturn false, schema.NewDiagnostics().AddErrorMsg(\"provider %s get metadata error: %s\", provider.Name, err.Error())\n\t}\n\tversion := metadata.LatestVersion\n\tpath := x.buildLocalProviderVersionPath(provider.Name, version)\n\treturn utils.Exists(path), nil\n\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/exists_test.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc getTestLocalProviderManager() *LocalProvidersManager {\n\tmanager, err := NewLocalProvidersManager(\"./test_download\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn manager\n}\n\nfunc TestLocalProvidersManager_IsProviderInstalled(t *testing.T) {\n\tinstalled, diagnostics := getTestLocalProviderManager().IsProviderInstalled(context.Background(), NewLocalProvider(\"aws\", \"v0.0.5\"))\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.False(t, installed)\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/get.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n)\n\n// Get Obtain information about the installed provider on the local device\nfunc (x *LocalProvidersManager) Get(ctx context.Context, localProvider *LocalProvider) (*LocalProvider, *schema.Diagnostics) {\n\tdiagnostics := schema.NewDiagnostics()\n\tproviderVersionMetaFilePath := x.buildLocalProviderVersionMetaFilePath(localProvider.Name, localProvider.Version)\n\tlocalProviderMeta, err := utils.ReadJsonFile[*LocalProvider](providerVersionMetaFilePath)\n\tif err != nil {\n\t\treturn nil, diagnostics.AddErrorMsg(\"read local provider version %s meta file failed: %s\", localProvider.String(), err)\n\t}\n\treturn localProviderMeta, diagnostics\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/get_test.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestLocalProvidersManager_Get(t *testing.T) {\n\n\ttestProviderName := \"aws\"\n\ttestProviderVersion := \"v0.0.5\"\n\n\tmanager := getTestLocalProviderManager()\n\n\tisInstalled, d := manager.IsProviderInstalled(context.Background(), NewLocalProvider(testProviderName, testProviderVersion))\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tassert.False(t, utils.HasError(d))\n\tif !isInstalled {\n\t\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\t\tt.Log(message.ToString())\n\t\t})\n\t\tmanager.InstallProvider(context.Background(), &InstallProvidersOptions{\n\t\t\tRequiredProvider: NewLocalProvider(\"aws\", \"v0.0.5\"),\n\t\t\tMessageChannel:   messageChannel,\n\t\t})\n\t\tmessageChannel.ReceiverWait()\n\t}\n\n\tlocalProvider, diagnostics := manager.Get(context.Background(), NewLocalProvider(\"aws\", \"v0.0.5\"))\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.Nil(t, localProvider)\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/install.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"github.com/hashicorp/go-getter\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/pointer\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"os\"\n\t\"time\"\n)\n\ntype InstallProvidersOptions struct {\n\n\t// What are the providers required to be installed\n\tRequiredProvider *LocalProvider\n\n\t// Used to receive messages in real time\n\tMessageChannel *message.Channel[*schema.Diagnostics]\n\n\tProgressTracker getter.ProgressTracker\n}\n\nfunc (x *LocalProvidersManager) InstallProvider(ctx context.Context, options *InstallProvidersOptions) {\n\n\tdefer func() {\n\t\toptions.MessageChannel.SenderWaitAndClose()\n\t}()\n\n\tpath := x.buildLocalProviderVersionPath(options.RequiredProvider.Name, options.RequiredProvider.Version)\n\tif utils.Exists(path) {\n\t\toptions.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"Provider %s in directory %s already installed, remove it first\", options.RequiredProvider.String(), path))\n\t\treturn\n\t}\n\n\t// check require provider & version exist\n\tmetadata, err := x.providerRegistry.GetMetadata(ctx, registry.NewProvider(options.RequiredProvider.Name, options.RequiredProvider.Version))\n\tif err != nil {\n\t\toptions.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"Get provider %s metadata error: %s\", options.RequiredProvider.String(), err.Error()))\n\t\treturn\n\t}\n\n\t// parse install version\n\tvar version string\n\tif !options.RequiredProvider.IsLatestVersion() {\n\t\tif !metadata.HasVersion(options.RequiredProvider.Version) {\n\t\t\treport := fmt.Sprintf(\"Provider %s does not exist, can not install it, I'm very sorry.\", options.RequiredProvider.String())\n\t\t\toptions.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(report))\n\t\t\treturn\n\t\t}\n\t\tversion = options.RequiredProvider.Version\n\t} else {\n\t\tversion = metadata.LatestVersion\n\t\tpath := x.buildLocalProviderVersionPath(options.RequiredProvider.Name, version)\n\t\tif utils.Exists(path) {\n\t\t\toptions.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Provider %s latest version has been installed on %s\", options.RequiredProvider.Name, path))\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Download the provider executable file\n\tproviderVersionPath := x.buildLocalProviderVersionPath(options.RequiredProvider.Name, version)\n\tdownloadOptions := &registry.ProviderRegistryDownloadOptions{\n\t\tProviderDownloadDirectoryPath: providerVersionPath,\n\t\tSkipVerify:                    pointer.TruePointer(),\n\t\tProgressTracker:               options.ProgressTracker,\n\t}\n\tproviderExecuteFilePath, err := x.providerRegistry.Download(ctx, registry.NewProvider(options.RequiredProvider.Name, version), downloadOptions)\n\tif err != nil {\n\t\toptions.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"Install provider %s in directory %s failed: %s\", options.RequiredProvider.String(), utils.AbsPath(providerVersionPath), err.Error()))\n\t\treturn\n\t}\n\t//options.MessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Install provider %s in directory %s success\", options.RequiredProvider.String(), utils.AbsPath(providerVersionPath)))\n\n\t// Construct metadata\n\tlocalProvider := LocalProvider{\n\t\tProvider:           registry.NewProvider(options.RequiredProvider.Name, version),\n\t\tExecutableFilePath: providerExecuteFilePath,\n\t\tChecksum:           \"\",\n\t\tInstallTime:        time.Now(),\n\t\tSource:             LocalProviderSourceGitHubRegistry,\n\t}\n\tmarshal, err := json.Marshal(localProvider)\n\tif err != nil {\n\t\toptions.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"On install provider %s, json marshal local provider error: %s\", options.RequiredProvider.String(), err.Error()))\n\t\treturn\n\t}\n\tmetaFilePath := x.buildLocalProviderVersionMetaFilePath(options.RequiredProvider.Name, version)\n\terr = os.WriteFile(metaFilePath, marshal, os.ModePerm)\n\tif err != nil {\n\t\toptions.MessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(\"On install provider %s, save provider version meta in file %s error: %s\", options.RequiredProvider.String(), metaFilePath, err.Error()))\n\t\treturn\n\t}\n\treturn\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/install_online_test.go",
    "content": "package local_providers_manager\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestInstallOnline(t *testing.T) {\n//\tif testing.Short() {\n//\t\tt.Skip(\"skipping test in short mode.\")\n//\t\treturn\n//\t}\n//\tglobal.SERVER = \"dev-api.selefra.io\"\n//\tglobal.LOGINTOKEN = \"xxxxxxxxxxxxxxxxxxxxxx\"\n//\t*global.WORKSPACE = \"../../tests/workspace/online\"\n//\tctx := context.Background()\n//\terr := install(ctx, []string{\"aws@latest\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/install_test.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"testing\"\n)\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestInstall(t *testing.T) {\n//\t*global.WORKSPACE = \"../../tests/workspace/offline\"\n//\tctx := context.Background()\n//\terr := install(ctx, []string{\"aws@latest\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n\nfunc TestLocalProvidersManager_InstallProvider(t *testing.T) {\n\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\tt.Log(message.ToString())\n\t})\n\tgetTestLocalProviderManager().InstallProvider(context.Background(), &InstallProvidersOptions{\n\t\tRequiredProvider: NewLocalProvider(\"mock\", \"v0.0.1\"),\n\t\tMessageChannel:   messageChannel,\n\t})\n\tmessageChannel.ReceiverWait()\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/list.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"errors\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"os\"\n)\n\n// ListProviders all providers installed locally\nfunc (x *LocalProvidersManager) ListProviders() ([]*LocalProviderVersions, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tpath := x.buildLocalProvidersPath()\n\tentrySlice, err := os.ReadDir(path)\n\tif err != nil {\n\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\treturn nil, diagnostics.AddInfo(\"You haven't installed any providers yet.\")\n\t\t} else {\n\t\t\treturn nil, diagnostics.AddErrorMsg(\"Can not exec list command, open directory %s error: %s\", path, err.Error())\n\t\t}\n\t}\n\n\tversionsSlice := make([]*LocalProviderVersions, 0)\n\tfor _, entry := range entrySlice {\n\t\tif !entry.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tproviderName := entry.Name()\n\t\tversions, d := x.ListProviderVersions(providerName)\n\n\t\tif !diagnostics.AddDiagnostics(d).HasError() && len(versions.ProviderVersionMap) > 0 {\n\t\t\tversionsSlice = append(versionsSlice, versions)\n\t\t}\n\t}\n\n\treturn versionsSlice, diagnostics\n}\n\n// ListProviderVersions Lists all the installed versions of this Provider\nfunc (x *LocalProvidersManager) ListProviderVersions(providerName string) (*LocalProviderVersions, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tproviderDirectory := x.buildLocalProviderPath(providerName)\n\tproviderVersionEntrySlice, err := os.ReadDir(providerDirectory)\n\tif err != nil {\n\t\treturn nil, diagnostics.AddErrorMsg(\"List provider versions read directory %s error: %s\", utils.AbsPath(providerDirectory), err.Error())\n\t}\n\n\tversions := NewLocalProviderVersions(providerName)\n\tfor _, providerVersionEntry := range providerVersionEntrySlice {\n\t\tif !providerVersionEntry.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tproviderVersion := providerVersionEntry.Name()\n\t\tpath := x.buildLocalProviderVersionMetaFilePath(providerName, providerVersion)\n\t\tlocalProvider, err := utils.ReadJsonFile[*LocalProvider](path)\n\t\tif err != nil {\n\t\t\tdiagnostics.AddError(err)\n\t\t} else {\n\t\t\tversions.AddLocalProvider(localProvider)\n\t\t}\n\t}\n\n\treturn versions, diagnostics\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/list_online_test.go",
    "content": "package local_providers_manager\n\n//import (\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestListOnline(t *testing.T) {\n//\tif testing.Short() {\n//\t\tt.Skip(\"skipping test in short mode.\")\n//\t\treturn\n//\t}\n//\tglobal.SERVER = \"dev-api.selefra.io\"\n//\tglobal.LOGINTOKEN = \"xxxxxxxxxxxxxxxxxxxxxx\"\n//\t*global.WORKSPACE = \"../../tests/workspace/online\"\n//\terr := list()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/list_test.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\n//import (\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestList(t *testing.T) {\n//\t*global.WORKSPACE = \"../../tests/workspace/offline\"\n//\terr := list()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n\nfunc TestLocalProvidersManager_ListProviderVersions(t *testing.T) {\n\n\ttestProviderName := \"aws\"\n\ttestProviderVersion := \"v0.0.5\"\n\n\tmanager := getTestLocalProviderManager()\n\n\tisInstalled, d := manager.IsProviderInstalled(context.Background(), NewLocalProvider(testProviderName, testProviderVersion))\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tassert.False(t, utils.HasError(d))\n\tif !isInstalled {\n\t\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\t\tt.Log(message.ToString())\n\t\t})\n\t\tmanager.InstallProvider(context.Background(), &InstallProvidersOptions{\n\t\t\tRequiredProvider: NewLocalProvider(testProviderName, testProviderVersion),\n\t\t\tMessageChannel:   messageChannel,\n\t\t})\n\t\tmessageChannel.ReceiverWait()\n\t}\n\n\tversions, diagnostics := getTestLocalProviderManager().ListProviderVersions(testProviderName)\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.GreaterOrEqual(t, 1, len(versions.ProviderVersionMap))\n}\n\nfunc TestLocalProvidersManager_ListProviders(t *testing.T) {\n\ttestProviderName := \"aws\"\n\ttestProviderVersion := \"v0.0.5\"\n\n\tmanager := getTestLocalProviderManager()\n\n\tisInstalled, d := manager.IsProviderInstalled(context.Background(), NewLocalProvider(testProviderName, testProviderVersion))\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tassert.False(t, utils.HasError(d))\n\tif !isInstalled {\n\t\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\t\tt.Log(message.ToString())\n\t\t})\n\t\tmanager.InstallProvider(context.Background(), &InstallProvidersOptions{\n\t\t\tRequiredProvider: NewLocalProvider(testProviderName, testProviderVersion),\n\t\t\tMessageChannel:   messageChannel,\n\t\t})\n\t\tmessageChannel.ReceiverWait()\n\t}\n\n\tproviders, diagnostics := getTestLocalProviderManager().ListProviders()\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.GreaterOrEqual(t, 1, len(providers))\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/local_provider.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"time\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype LocalProviderSource int\n\nconst (\n\tLocalProviderSourceUnknown LocalProviderSource = iota\n\tLocalProviderSourceGitHubRegistry\n\tLocalProviderSourceLocalRegistry\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype LocalProvider struct {\n\t*registry.Provider\n\n\t// provider executable file path\n\tExecutableFilePath string `json:\"executable-file-path\"`\n\n\t// only support sha256 current\n\tChecksum string `json:\"checksum\"`\n\n\t// The installation time of the provider\n\tInstallTime time.Time `json:\"install-time\"`\n\n\t// Where is this provider obtained from\n\tSource LocalProviderSource `json:\"source\"`\n\n\t// Source dependent context\n\tSourceContext string `json:\"source-context\"`\n}\n\nfunc NewLocalProvider(providerName, providerVersion string) *LocalProvider {\n\treturn &LocalProvider{\n\t\tProvider: registry.NewProvider(providerName, providerVersion),\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// LocalProviderVersions Indicates all versions and related information of a provider\ntype LocalProviderVersions struct {\n\n\t// provider name\n\tProviderName string `json:\"provider-name\"`\n\n\t// All versions of the provider\n\tProviderVersionMap map[string]*LocalProvider `json:\"provider-version-map\"`\n}\n\nfunc NewLocalProviderVersions(providerName string) *LocalProviderVersions {\n\treturn &LocalProviderVersions{\n\t\tProviderName:       providerName,\n\t\tProviderVersionMap: make(map[string]*LocalProvider, 0),\n\t}\n}\n\nfunc (x *LocalProviderVersions) AddLocalProvider(localProvider *LocalProvider) {\n\tx.ProviderVersionMap[localProvider.Version] = localProvider\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/manager.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"path/filepath\"\n)\n\nconst (\n\n\t// LocalProvidersDirectoryName The locally installed providers are stored in the download directory\n\tLocalProvidersDirectoryName = \"providers\"\n\n\t// LocalProvidersVersionMetaFileName The local provider version will have a metadata file, and this field indicates the name of that metadata file\n\tLocalProvidersVersionMetaFileName = \".version-meta.json\"\n\n\t// LocalProvidersProviderMetaFileName The local provider will have a metadata file, and this field represents the name of that metadata file\n\tLocalProvidersProviderMetaFileName = \".provider-meta.json\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// LocalProvidersManager TODO Add file locks to avoid concurrency problems during multi-process operations\ntype LocalProvidersManager struct {\n\n\t// selefra Specifies the storage path of the downloaded file\n\tdownloadWorkspace string\n\n\t// The provider registry is used to update the provider from the remote end\n\tproviderRegistry registry.ProviderRegistry\n}\n\nfunc NewLocalProvidersManager(downloadWorkspace string) (*LocalProvidersManager, error) {\n\n\t// init provider registry\n\toptions := registry.NewProviderGithubRegistryOptions(downloadWorkspace)\n\tproviderRegistry, err := registry.NewProviderGithubRegistry(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &LocalProvidersManager{\n\t\tdownloadWorkspace: downloadWorkspace,\n\t\tproviderRegistry:  providerRegistry,\n\t}, nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc (x *LocalProvidersManager) buildLocalProvidersPath() string {\n\treturn filepath.Join(x.downloadWorkspace, LocalProvidersDirectoryName)\n}\n\nfunc (x *LocalProvidersManager) buildLocalProviderPath(providerName string) string {\n\treturn filepath.Join(x.downloadWorkspace, LocalProvidersDirectoryName, providerName)\n}\n\n// provider metadata file path\nfunc (x *LocalProvidersManager) buildLocalProviderMetaFilePath(providerName string) string {\n\treturn filepath.Join(x.downloadWorkspace, LocalProvidersDirectoryName, providerName, LocalProvidersProviderMetaFileName)\n}\n\n// Folder in which the provider version is stored\nfunc (x *LocalProvidersManager) buildLocalProviderVersionPath(providerName, providerVersion string) string {\n\treturn filepath.Join(x.downloadWorkspace, LocalProvidersDirectoryName, providerName, providerVersion)\n}\n\n// Location for storing the metadata of the provider version\nfunc (x *LocalProvidersManager) buildLocalProviderVersionMetaFilePath(providerName, providerVersion string) string {\n\treturn filepath.Join(x.downloadWorkspace, LocalProvidersDirectoryName, providerName, providerVersion, LocalProvidersVersionMetaFileName)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/remove.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/selefra/selefra/pkg/version\"\n\t\"os\"\n)\n\n// RemoveProviders Delete the locally installed provider by name and version. If no version is specified, all versions of the provider are deleted by default\nfunc (x *LocalProvidersManager) RemoveProviders(ctx context.Context, providerNameVersionSlice ...string) *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tif len(providerNameVersionSlice) == 0 {\n\t\treturn diagnostics.AddErrorMsg(\"Must specify at least one provider version for remove, for example: aws@v0.0.1\")\n\t}\n\n\t// Analyze whether the providers to be deleted are valid and exist\n\tdeleteActionSlice := make([]func() *schema.Diagnostics, 0)\n\tfor _, providerNameVersion := range providerNameVersionSlice {\n\t\tnameVersion := version.ParseNameAndVersion(providerNameVersion)\n\n\t\tif nameVersion.Version == \"\" {\n\t\t\tdiagnostics.AddErrorMsg(\"The version number cannot be empty. Specify the version number in the format providerName@version, for example: aws@v0.0.1\")\n\t\t\tcontinue\n\t\t} else if nameVersion.IsLatestVersion() {\n\t\t\tdiagnostics.AddErrorMsg(\"The version number cannot be latest. Specify a version number, for example: aws@v0.0.1\")\n\t\t\tcontinue\n\t\t}\n\n\t\tpath := x.buildLocalProviderVersionPath(nameVersion.Name, nameVersion.Version)\n\t\tif !utils.Exists(path) {\n\t\t\tdiagnostics.AddErrorMsg(\"Provider version %s not found in %s\", providerNameVersion, x.buildLocalProvidersPath())\n\t\t\tcontinue\n\t\t}\n\n\t\tdeleteActionSlice = append(deleteActionSlice, func() *schema.Diagnostics {\n\t\t\terr := os.RemoveAll(path)\n\t\t\tif err != nil {\n\t\t\t\treturn schema.NewDiagnostics().AddErrorMsg(\"Remove provider %s at local directory %s failed: %s\", nameVersion.String(), path, err.Error())\n\t\t\t} else {\n\t\t\t\treturn schema.NewDiagnostics().AddInfo(\"Remove provider %s at local directory %s success\", nameVersion.String(), path)\n\t\t\t}\n\t\t})\n\n\t}\n\tif diagnostics.HasError() {\n\t\treturn diagnostics\n\t}\n\n\t// Perform a delete operation\n\tfor _, action := range deleteActionSlice {\n\t\tdiagnostics.AddDiagnostics(action())\n\t}\n\n\treturn diagnostics\n}\n\n//func RemoveProviders(names []string) error {\n//\targsMap := make(map[string]bool)\n//\tfor i := range names {\n//\t\targsMap[names[i]] = true\n//\t}\n//\tdeletedMap := make(map[string]bool)\n//\terr := config.IsSelefraWorkspace()\n//\tif err != nil {\n//\t\tui.PrintErrorLn(err.Error())\n//\t\treturn err\n//\t}\n//\tvar cof = &config.SelefraBlock{}\n//\n//\tnamespace, _, err := utils.Home()\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tprovider := registry.NewProviderRegistry(namespace)\n//\terr = cof.UnmarshalConfig()\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tfor _, p := range cof.Selefra.Providers {\n//\t\tname := *p.Source\n//\t\tpath := utils.GetPathBySource(*p.Source, p.Version)\n//\t\tprov := registry.ProviderBinary{\n//\t\t\tProvider: registry.Provider{\n//\t\t\t\tName:    name,\n//\t\t\t\tVersion: p.Version,\n//\t\t\t\tSource:  \"\",\n//\t\t\t},\n//\t\t\tFilePath: path,\n//\t\t}\n//\t\tif !argsMap[p.Name] || deletedMap[p.Path] {\n//\t\t\tbreak\n//\t\t}\n//\n//\t\terr := provider.DeleteProvider(prov)\n//\t\tif err != nil {\n//\t\t\tif !errors.Is(err, os.ErrNotExist) {\n//\t\t\t\tui.PrintWarningF(\"Failed to remove  %s: %s\", p.Name, err.Error())\n//\t\t\t}\n//\t\t}\n//\t\t_, jsonPath, err := utils.Home()\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\t\tc, err := os.ReadFile(jsonPath)\n//\t\tif err == nil {\n//\t\t\tvar configMap = make(map[string]string)\n//\t\t\terr = json.Unmarshal(c, &configMap)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\tdelete(configMap, *p.Source+\"@\"+p.Version)\n//\t\t\tc, err = json.Marshal(configMap)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\terr = os.RemoveProviders(jsonPath)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\terr = os.WriteFile(jsonPath, c, 0644)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\tdeletedMap[path] = true\n//\t\t}\n//\t\tui.PrintSuccessF(\"Removed %s success\", *p.Source)\n//\t}\n//\treturn nil\n//}\n\n//// DeleteProvider Delete the provider of a given version\n//DeleteProvider(binary *ProviderBinary) error\n\n//func (x *ProviderGithubRegistry) DeleteProvider(binary *ProviderBinary) error {\n//\treturn x.deleteProviderBinary(binary)\n//}\n//\n//func (x *ProviderGithubRegistry) deleteProviderBinary(binary *ProviderBinary) error {\n//\tif _, err := os.Stat(binary.FilePath); err != nil {\n//\t\treturn err\n//\t}\n//\treturn os.RemoveAll(filepath.Dir(binary.FilePath))\n//}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/remove_online_test.go",
    "content": "package local_providers_manager\n//\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestRemoveOnline(t *testing.T) {\n//\tif testing.Short() {\n//\t\tt.Skip(\"skipping test in short mode.\")\n//\t\treturn\n//\t}\n//\tglobal.SERVER = \"dev-api.selefra.io\"\n//\tglobal.LOGINTOKEN = \"xxxxxxxxxxxxxxxxxxxxxx\"\n//\t*global.WORKSPACE = \"../../tests/workspace/online\"\n//\terr := RemoveProviders([]string{\"aws\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\terr = install(context.Background(), []string{\"aws@latest\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/remove_test.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestRemove(t *testing.T) {\n//\t*global.WORKSPACE = \"../../tests/workspace/offline\"\n//\terr := RemoveProviders([]string{\"aws\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\terr = install(context.Background(), []string{\"aws@latest\"})\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n\nfunc TestLocalProvidersManager_RemoveProviders(t *testing.T) {\n\td := getTestLocalProviderManager().RemoveProviders(context.Background(), \"aws@v0.0.5\")\n\tif utils.HasError(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tassert.False(t, utils.HasError(d))\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/search.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/registry\"\n\t\"strings\"\n)\n\n// SearchLocal Search for the provider installed on the local device\nfunc (x *LocalProvidersManager) SearchLocal(ctx context.Context, keyword string) ([]*LocalProviderVersions, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tproviders, d := x.ListProviders()\n\tif diagnostics.AddDiagnostics(d).HasError() {\n\t\treturn nil, diagnostics\n\t}\n\n\tkeyword = strings.ToLower(keyword)\n\thitProviderSlice := make([]*LocalProviderVersions, 0)\n\tfor _, provider := range providers {\n\t\tif strings.Contains(strings.ToLower(provider.ProviderName), keyword) {\n\t\t\thitProviderSlice = append(hitProviderSlice, provider)\n\t\t}\n\t}\n\n\treturn hitProviderSlice, diagnostics\n}\n\n// SearchRegistry Search the provider by keyword on the configured registry\nfunc (x *LocalProvidersManager) SearchRegistry(ctx context.Context, keyword string) ([]*registry.Provider, *schema.Diagnostics) {\n\tproviderSlice, err := x.providerRegistry.Search(ctx, keyword)\n\tif err != nil {\n\t\treturn nil, schema.NewDiagnostics().AddError(err)\n\t}\n\treturn providerSlice, nil\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/search_test.go",
    "content": "package local_providers_manager\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestLocalProvidersManager_SearchLocal(t *testing.T) {\n\n\ttestProviderName := \"aws\"\n\ttestProviderVersion := \"v0.0.5\"\n\n\tmanager := getTestLocalProviderManager()\n\n\tisInstalled, d := manager.IsProviderInstalled(context.Background(), NewLocalProvider(testProviderName, testProviderVersion))\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.ToString())\n\t}\n\tassert.False(t, utils.HasError(d))\n\tif !isInstalled {\n\t\tmessageChannel := message.NewChannel[*schema.Diagnostics](func(index int, message *schema.Diagnostics) {\n\t\t\tt.Log(message.ToString())\n\t\t})\n\t\tmanager.InstallProvider(context.Background(), &InstallProvidersOptions{\n\t\t\tRequiredProvider: NewLocalProvider(testProviderName, testProviderVersion),\n\t\t\tMessageChannel:   messageChannel,\n\t\t})\n\t\tmessageChannel.ReceiverWait()\n\t}\n\n\thitProviders, diagnostics := manager.SearchLocal(context.Background(), testProviderName)\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.Len(t, hitProviders, 1)\n\tisContains := false\n\tfor _, provider := range hitProviders {\n\t\tif provider.ProviderName == testProviderName {\n\t\t\tisContains = true\n\t\t}\n\t}\n\tassert.True(t, isContains)\n}\n\nfunc TestLocalProvidersManager_SearchRegistry(t *testing.T) {\n\n\ttestProviderName := \"aws\"\n\n\thitProviders, diagnostics := getTestLocalProviderManager().SearchRegistry(context.Background(), testProviderName)\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.ToString())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.Len(t, hitProviders, 1)\n\tisContains := false\n\tfor _, provider := range hitProviders {\n\t\tif provider.Name == testProviderName {\n\t\t\tisContains = true\n\t\t}\n\t}\n\tassert.True(t, isContains)\n}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/sync.go",
    "content": "package local_providers_manager\n\n//import (\n//\t\"context\"\n//\t\"fmt\"\n//\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n//\t\"github.com/selefra/selefra/cmd/fetch\"\n//\t\"github.com/selefra/selefra/cmd/test\"\n//\t\"github.com/selefra/selefra/cmd/tools\"\n//\t\"github.com/selefra/selefra/config\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"github.com/selefra/selefra/pkg/http_client\"\n//\t\"github.com/selefra/selefra/pkg/registry\"\n//\t\"github.com/selefra/selefra/pkg/utils\"\n//\t\"github.com/selefra/selefra/ui\"\n//\t\"path/filepath\"\n//\t\"time\"\n//)\n//\n//import (\n//\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n//)\n//\n//type lockStruct struct {\n//\tSchemaKey string\n//\tUuid      string\n//\tStorage   *postgresql_storage.PostgresqlStorage\n//}\n//\n//// TODO Required dependency Module\n//\n//func (x *LocalProvidersManager) Sync() (errLogs []string, lockSlice []lockStruct, err error) {\n//\tui.PrintSuccessLn(\"Initializing provider plugins...\\n\")\n//\tctx := context.Background()\n//\tvar cof = &config.SelefraBlock{}\n//\terr = cof.UnmarshalConfig()\n//\tif err != nil {\n//\t\treturn nil, nil, err\n//\t}\n//\tnamespace, _, err := utils.Home()\n//\tif err != nil {\n//\t\treturn nil, nil, err\n//\t}\n//\tprovider := registry.NewProviderGithubRegistry(namespace)\n//\tui.PrintSuccessF(\"Selefra has been successfully installed providers!\\n\")\n//\tui.PrintSuccessF(\"Checking Selefra provider updates......\\n\")\n//\n//\tvar hasError bool\n//\tvar ProviderRequires []*config.ProviderRequired\n//\tfor _, p := range cof.Selefra.Providers {\n//\t\tconfigVersion := p.Version\n//\t\tprov := registry.Provider{\n//\t\t\tName:    p.Name,\n//\t\t\tVersion: p.Version,\n//\t\t\tSource:  \"\",\n//\t\t\tPath:    p.Path,\n//\t\t}\n//\t\tpp, err := provider.Download(ctx, prov, true)\n//\t\tif err != nil {\n//\t\t\thasError = true\n//\t\t\tui.PrintErrorF(\"%s@%s failed updated：%s\", p.Name, p.Version, err.Error())\n//\t\t\terrLogs = append(errLogs, fmt.Sprintf(\"%s@%s failed updated：%s\", p.Name, p.Version, err.Error()))\n//\t\t\tcontinue\n//\t\t} else {\n//\t\t\tp.Path = pp.Filepath\n//\t\t\tp.Version = pp.Version\n//\t\t\terr = tools.SetSelefraProvider(pp, nil, configVersion)\n//\t\t\tif err != nil {\n//\t\t\t\thasError = true\n//\t\t\t\tui.PrintErrorF(\"%s@%s failed updated：%s\", p.Name, p.Version, err.Error())\n//\t\t\t\terrLogs = append(errLogs, fmt.Sprintf(\"%s@%s failed updated：%s\", p.Name, p.Version, err.Error()))\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t\tProviderRequires = append(ProviderRequires, p)\n//\t\t\tui.PrintSuccessF(\"\t%s@%s all ready updated!\\n\", p.Name, p.Version)\n//\t\t}\n//\t}\n//\n//\tui.PrintSuccessF(\"Selefra has been finished Upgrade providers!\\n\")\n//\n//\terr = test.CheckSelefraConfig(ctx, *cof)\n//\tif err != nil {\n//\t\tif global.LOGINTOKEN != \"\" && cof.Selefra.CloudBlock != nil && err == nil {\n//\t\t\t_ = http_client.SetupStag(global.LOGINTOKEN, cof.Selefra.CloudBlock.Project, http_client.Failed)\n//\t\t}\n//\t\treturn nil, nil, err\n//\t}\n//\n//\t_, err = grpc_client.Cli.UploadLogStatus()\n//\tif err != nil {\n//\t\tui.PrintErrorLn(err.Error())\n//\t}\n//\tglobal.STAG = \"pull\"\n//\tfor _, p := range ProviderRequires {\n//\t\tconfs, err := tools.GetProviders(cof, p.Name)\n//\t\tif err != nil {\n//\t\t\tui.PrintErrorLn(err.Error())\n//\t\t\tcontinue\n//\t\t}\n//\t\tfor _, conf := range confs {\n//\t\t\tstore, err := tools.GetStore(*cof, p, conf)\n//\t\t\tif err != nil {\n//\t\t\t\thasError = true\n//\t\t\t\tui.PrintErrorF(\"%s@%s failed updated：%s\", p.Name, p.Version, err.Error())\n//\t\t\t\terrLogs = append(errLogs, fmt.Sprintf(\"%s@%s failed updated：%s\", p.Name, p.Version, err.Error()))\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t\tctx := context.Background()\n//\t\t\tuuid := id_util.RandomId()\n//\t\t\tvar cp config.CliProviders\n//\t\t\terr = yaml.Unmarshal([]byte(conf), &cp)\n//\t\t\tif err != nil {\n//\t\t\t\tui.PrintErrorLn(err.Error())\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t\tschemaKey := config.GetSchemaKey(p, cp)\n//\t\t\tfor {\n//\t\t\t\terr = store.Lock(ctx, schemaKey, uuid)\n//\t\t\t\tif err == nil {\n//\t\t\t\t\tbreak\n//\t\t\t\t}\n//\t\t\t\ttime.Sleep(5 * time.Second)\n//\t\t\t}\n//\t\t\tlockSlice = append(lockSlice, lockStruct{\n//\t\t\t\tSchemaKey: schemaKey,\n//\t\t\t\tUuid:      uuid,\n//\t\t\t\tStorage:   store,\n//\t\t\t})\n//\t\t\tneed, _ := tools.NeedFetch(*p, *cof, conf)\n//\t\t\tif !need {\n//\t\t\t\tui.PrintSuccessF(\"%s %s@%s pull infrastructure data:\\n\", cp.Name, p.Name, p.Version)\n//\t\t\t\tui.PrintCustomizeLnNotShow(fmt.Sprintf(\"Pulling %s@%s Please wait for resource information ...\", p.Name, p.Version))\n//\t\t\t\tui.PrintSuccessF(\"\t%s@%s all ready use cache!\\n\", p.Name, p.Version)\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t\terr = fetch.Fetch(ctx, cof, p, conf)\n//\t\t\tif err != nil {\n//\t\t\t\tui.PrintErrorF(\"%s %s Synchronization failed：%s\", p.Name, p.Version, err.Error())\n//\t\t\t\thasError = true\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t\trequireKey := config.GetCacheKey()\n//\t\t\terr = tools.SetStoreValue(*cof, p, conf, requireKey, time.Now().Format(time.RFC3339))\n//\t\t\tif err != nil {\n//\t\t\t\tui.PrintWarningF(\"%s %s set cache time failed：%s\", p.Name, p.Version, err.Error())\n//\t\t\t\thasError = true\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t}\n//\t}\n//\tif hasError {\n//\t\tui.PrintErrorF(`\n//This may be exception, view detailed exception in %s .\n//`, filepath.Join(*global.WORKSPACE, \"logs\"))\n//\t}\n//\n//\treturn errLogs, lockSlice, nil\n//}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/sync_online_test.go",
    "content": "package local_providers_manager\n\n//import (\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestSyncOnline(t *testing.T) {\n//\tif testing.Short() {\n//\t\tt.Skip(\"skipping test in short mode.\")\n//\t\treturn\n//\t}\n//\tglobal.SERVER = \"dev-api.selefra.io\"\n//\tglobal.LOGINTOKEN = \"xxxxxxxxxxxxxxxxxxxxxx\"\n//\t*global.WORKSPACE = \"../../tests/workspace/online\"\n//\terrLogs, _, err := Sync()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\tif len(errLogs) != 0 {\n//\t\tt.Error(errLogs)\n//\t}\n//}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/sync_test.go",
    "content": "package local_providers_manager\n\n//import (\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestSync(t *testing.T) {\n//\t*global.WORKSPACE = \"../../tests/workspace/offline\"\n//\terrLogs, _, err := Sync()\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//\tif len(errLogs) != 0 {\n//\t\tt.Error(errLogs)\n//\t}\n//}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/update.go",
    "content": "package local_providers_manager\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n//)\n//\n//type UpgradeOptions struct {\n//}\n//\n//func (x *LocalProvidersManager) Upgrade(ctx context.Context, providerNameSlice []string, messageChannel chan *schema.Diagnostics) {\n//\n//\tdefer func() {\n//\t\tclose(messageChannel)\n//\t}()\n//\n//}\n\n//func Upgrade(ctx context.Context, providerNameSlice []string, messageChannel chan *schema.Diagnostics) {\n//\n//\tdefer func() {\n//\t\tclose(messageChannel)\n//\t}()\n//\n//\tdiagnostics := schema.NewDiagnostics()\n//\n//\terr := config.IsSelefraWorkspace()\n//\tif err != nil {\n//\t\tmessageChannel <- diagnostics.AddErrorMsg(err.Error())\n//\t\treturn\n//\t}\n//\n//\tvar cof = &config.SelefraBlock{}\n//\terr = cof.Get()\n//\tif err != nil {\n//\t\tmessageChannel <- diagnostics.AddErrorMsg(err.Error())\n//\t\treturn\n//\t}\n//\n//\tproviderNameMap := make(map[string]struct{})\n//\tfor _, providerName := range providerNameSlice {\n//\t\tproviderNameMap[providerName] = struct{}{}\n//\t}\n//\n//\tprovider := registry.NewProviderGithubRegistry(x.downloadWorkspace)\n//\tfor _, p := range cof.Selefra.Providers {\n//\t\tprov := registry.ProviderBinary{\n//\t\t\tProvider: &registry.Provider{\n//\t\t\t\tName:    p.Name,\n//\t\t\t\tVersion: p.Version,\n//\t\t\t\tSource:  \"\",\n//\t\t\t},\n//\t\t\tFilePath: p.Path,\n//\t\t}\n//\t\tif len(providerNameSlice) != 0 && !providerNameMap[p.Name] {\n//\t\t\tbreak\n//\t\t}\n//\n//\t\tpp, err := provider.CheckUpdate(ctx, prov)\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\t\tp.Path = pp.Filepath\n//\t\tp.Version = pp.Version\n//\t\tconfs, err := tools.GetProviders(cof, p.Name)\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\t\tfor _, c := range confs {\n//\t\t\terr = fetch.Fetch(ctx, cof, p, c)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t}\n//\t}\n//\treturn nil\n//}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/update_online_test.go",
    "content": "package local_providers_manager\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/cmd/provider\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestUpdateOnline(t *testing.T) {\n//\tif testing.Short() {\n//\t\tt.Skip(\"skipping test in short mode.\")\n//\t\treturn\n//\t}\n//\tglobal.SERVER = \"dev-api.selefra.io\"\n//\tglobal.LOGINTOKEN = \"xxxxxxxxxxxxxxxxxxxxxx\"\n//\t*global.WORKSPACE = \"../../tests/workspace/online\"\n//\tctx := context.Background()\n//\targ := []string{\"aws\"}\n//\terr := provider.Upgrade(ctx, arg)\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n"
  },
  {
    "path": "pkg/providers/local_providers_manager/update_test.go",
    "content": "package local_providers_manager\n\n//import (\n//\t\"context\"\n//\t\"github.com/selefra/selefra/cmd/provider\"\n//\t\"github.com/selefra/selefra/global\"\n//\t\"testing\"\n//)\n//\n//func TestUpdate(t *testing.T) {\n//\t*global.WORKSPACE = \"../../tests/workspace/offline\"\n//\tctx := context.Background()\n//\targ := []string{\"aws\"}\n//\terr := provider.Upgrade(ctx, arg)\n//\tif err != nil {\n//\t\tt.Error(err)\n//\t}\n//}\n"
  },
  {
    "path": "pkg/registry/module_github_registry.go",
    "content": "package registry\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/pointer\"\n\t\"github.com/selefra/selefra/pkg/http_client\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\n// TODO Consider a clone-based way to execute private positions support\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst (\n\tModulesListDirectoryName = \"module\"\n)\n\n// ModuleGithubRegistryDefaultRepoFullName The default official module registry\nconst ModuleGithubRegistryDefaultRepoFullName = \"selefra/registry\"\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ModuleGithubRegistryOptions struct {\n\tDownloadWorkspace    string\n\tRegistryRepoFullName *string\n}\n\nfunc NewModuleGithubRegistryOptions(downloadWorkspace string, registryRepoFullName ...string) *ModuleGithubRegistryOptions {\n\n\tif len(registryRepoFullName) == 0 {\n\t\tregistryRepoFullName = append(registryRepoFullName, ModuleGithubRegistryDefaultRepoFullName)\n\t}\n\n\treturn &ModuleGithubRegistryOptions{\n\t\tDownloadWorkspace:    downloadWorkspace,\n\t\tRegistryRepoFullName: pointer.ToStringPointer(registryRepoFullName[0]),\n\t}\n}\n\nfunc (x *ModuleGithubRegistryOptions) Check() *schema.Diagnostics {\n\t// TODO check params\n\treturn nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ModuleGitHubRegistry struct {\n\t// The owner of registry's repo\n\towner string\n\t// The name of registry's repo\n\trepoName string\n\n\toptions *ModuleGithubRegistryOptions\n}\n\nvar _ ModuleRegistry = &ModuleGitHubRegistry{}\n\nfunc NewModuleGitHubRegistry(options *ModuleGithubRegistryOptions) (*ModuleGitHubRegistry, error) {\n\n\t// set default registry url\n\tif options.RegistryRepoFullName == nil {\n\t\toptions.RegistryRepoFullName = pointer.ToStringPointer(ModuleGithubRegistryDefaultRepoFullName)\n\t}\n\n\t// Parse the full name of the github repository\n\towner, repo, err := utils.ParseGitHubRepoFullName(pointer.FromStringPointer(options.RegistryRepoFullName))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ModuleGitHubRegistry{\n\t\towner:    owner,\n\t\trepoName: repo,\n\t\toptions:  options,\n\t}, nil\n}\n\nfunc (x *ModuleGitHubRegistry) Download(ctx context.Context, module *Module, options *ModuleRegistryDownloadOptions) (string, error) {\n\treturn downloadModule(ctx, x.buildRegistryUrl(), module, options)\n}\n\nfunc (x *ModuleGitHubRegistry) GetLatestVersion(ctx context.Context, module *Module) (*Module, error) {\n\tmetadata, err := x.GetMetadata(ctx, module)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewModule(metadata.Name, metadata.LatestVersion), nil\n}\n\nfunc (x *ModuleGitHubRegistry) GetAllVersion(ctx context.Context, module *Module) ([]*Module, error) {\n\tmetadata, err := x.GetMetadata(ctx, module)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmoduleSlice := make([]*Module, 0)\n\tfor _, version := range metadata.Versions {\n\t\tmoduleSlice = append(moduleSlice, NewModule(module.Name, version))\n\t}\n\treturn moduleSlice, nil\n}\n\nfunc (x *ModuleGitHubRegistry) GetMetadata(ctx context.Context, module *Module) (*ModuleMetadata, error) {\n\treturn getModuleMeta(ctx, x.buildRegistryUrl(), module)\n}\n\nfunc (x *ModuleGitHubRegistry) GetSupplement(ctx context.Context, module *Module) (*ModuleSupplement, error) {\n\treturn getModuleSupplement(ctx, x.buildRegistryUrl(), module)\n}\n\nfunc (x *ModuleGitHubRegistry) buildModuleRegistryDownloadDirectory() string {\n\treturn filepath.Join(x.options.DownloadWorkspace, \"registry/github\", x.owner, x.repoName)\n}\n\nfunc (x *ModuleGitHubRegistry) List(ctx context.Context) ([]*Module, error) {\n\tlocalRegistryDirectoryPath := x.buildModuleRegistryDownloadDirectory()\n\terr := http_client.NewGitHubRepoDownloader().Download(ctx, &http_client.GitHubRepoDownloaderOptions{\n\t\tOwner:             x.owner,\n\t\tRepo:              x.repoName,\n\t\tDownloadDirectory: localRegistryDirectoryPath,\n\t\tCacheTime:         pointer.ToDurationPointer(time.Hour),\n\t\t// TODO no ProgressListener, is ok?\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// TODO create local module registry\n\tregistry, err := NewModuleLocalRegistry(localRegistryDirectoryPath, x.buildRegistryRepoFullName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn registry.List(ctx)\n}\n\nfunc (x *ModuleGitHubRegistry) Search(ctx context.Context, keyword string) ([]*Module, error) {\n\tallModuleSlice, err := x.List(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeyword = strings.ToLower(keyword)\n\tsearchResultSlice := make([]*Module, 0)\n\tfor _, module := range allModuleSlice {\n\t\tif strings.Contains(strings.ToLower(module.Name), keyword) {\n\t\t\tsearchResultSlice = append(searchResultSlice, module)\n\t\t}\n\t}\n\treturn searchResultSlice, nil\n}\n\nfunc (x *ModuleGitHubRegistry) buildRegistryRepoFullName() string {\n\treturn fmt.Sprintf(\"%s/%s\", x.owner, x.repoName)\n}\n\nfunc (x *ModuleGitHubRegistry) buildRegistryUrl() string {\n\treturn fmt.Sprintf(\"https://raw.githubusercontent.com/%s/%s/\", x.owner, x.repoName)\n}\n\nfunc (x *ModuleGitHubRegistry) CheckUpdate(ctx context.Context, module *Module) (*Module, error) {\n\tif module.IsLatestVersion() {\n\t\treturn nil, nil\n\t}\n\n\tmetadata, err := x.GetMetadata(ctx, module)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// already is latest version\n\tif module.Version == metadata.LatestVersion {\n\t\treturn nil, nil\n\t}\n\n\treturn NewModule(module.Name, metadata.LatestVersion), nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc downloadModule(ctx context.Context, registryUrl string, module *Module, options *ModuleRegistryDownloadOptions) (string, error) {\n\n\tif err := formatModuleVersion(ctx, registryUrl, module); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsupplement, err := getModuleSupplement(ctx, registryUrl, module)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := utils.EnsureDirectoryNotExists(options.ModuleDownloadDirectoryPath); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tgithubReleaseAssertName := supplement.PackageName\n\n\t// TODO optimization, Improve compatibility\n\t// example: https://github.com/selefra/rules-aws-misconfiguration-s3/releases/download/v0.0.1/rules-aws-misconfigure-s3.zip\n\t// example: https://github.com/selefra/rules-aws-misconfiguration-s3/archive/refs/tags/v0.0.2.zip\n\tgithubReleaseAssertURL := supplement.Source + \"/releases/download/\" + module.Version + \"/\" + githubReleaseAssertName + \".zip\"\n\n\t// TODO checksum\n\t//if !pointer.FromBoolPointerOrDefault(options.SkipVerify, true) {\n\t//\tchecksum, err := supplement.Checksums.selectChecksums()\n\t//\tif err != nil {\n\t//\t\treturn \"\", err\n\t//\t}\n\t//\tgithubReleaseAssertURL += \"?checksum=sha256:\" + checksum\n\t//}\n\n\t// Example URL:\n\t// https://github.com/selefra/rules-aws-misconfiguration-s3/archive/refs/tags/v0.0.2.zip\n\t// https://github.com/selefra/rules-aws-misconfigure-s3/releases/download/v0.0.4/rules-aws-misconfigure-s3.zip\n\terr = http_client.DownloadToDirectory(ctx, options.ModuleDownloadDirectoryPath, githubReleaseAssertURL, options.ProgressTracker)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// search download file\n\tif utils.Exists(options.ModuleDownloadDirectoryPath) {\n\t\treturn filepath.Join(options.ModuleDownloadDirectoryPath), nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"module %s download failed\", supplement.PackageName)\n}\n\nfunc formatModuleVersion(ctx context.Context, registryUrl string, module *Module) error {\n\tif !module.IsLatestVersion() {\n\t\treturn nil\n\t}\n\tmeta, err := getModuleMeta(ctx, registryUrl, module)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmodule.Version = meta.LatestVersion\n\treturn nil\n}\n\nfunc buildModuleDownloadPath(downloadWorkspace string, module *Module) string {\n\treturn fmt.Sprintf(\"%s/%s/%s/%s\", downloadWorkspace, ModulesListDirectoryName, module.Name, module.Version)\n}\n\nfunc getModuleMeta(ctx context.Context, registryUrl string, module *Module) (*ModuleMetadata, error) {\n\tmetadataUrl := fmt.Sprintf(\"%s/main/%s/%s/%s\", registryUrl, ModulesListDirectoryName, module.Name, MetaDataFileName)\n\tgetYaml, err := http_client.GetYaml[*ModuleMetadata](ctx, metadataUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getYaml, nil\n}\n\nfunc getModuleSupplement(ctx context.Context, registryUrl string, module *Module) (*ModuleSupplement, error) {\n\tsupplementUrl := fmt.Sprintf(\"%s/main/%s/%s/%s/%s\", registryUrl, ModulesListDirectoryName, module.Name, module.Version, SupplementFileName)\n\tsupplement, err := http_client.GetYaml[*ModuleSupplement](ctx, supplementUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn supplement, err\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n//func GetHomeModulesPath(modules string, org string) (string, error) {\n//\tpath, _, err := Home()\n//\tif err != nil {\n//\t\treturn \"\", err\n//\t}\n//\tmodulesPath := filepath.Join(path, \"download/modules\")\n//\terr = ModulesUpdate(modules, modulesPath, org)\n//\tif err != nil {\n//\t\treturn \"\", err\n//\t}\n//\t_, err = os.Stat(modulesPath)\n//\tif err != nil {\n//\t\treturn \"\", err\n//\t}\n//\tif errors.Is(err, os.ErrNotExist) {\n//\t\terr = os.MkdirAll(modulesPath, 0755)\n//\t\tif err != nil {\n//\t\t\treturn \"\", err\n//\t\t}\n//\t}\n//\treturn modulesPath, nil\n//}\n\n//func ModulesUpdate(modulesName string, modulesPath string, org string) error {\n//\t_, config, err := Home()\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tc, err := os.ReadFile(config)\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tvar configMap = make(map[string]string)\n//\terr = json.Unmarshal(c, &configMap)\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\n//\tif org != \"\" {\n//\t\turl := \"https://\" + global.SERVER + \"/cli/download/\" + org + \"/\" + global.Token() + \"/\" + modulesName + \".zip\"\n//\t\t_, err := os.Stat(filepath.Join(modulesPath, modulesName))\n//\t\tif err == nil {\n//\t\t\terr = os.RemoveAll(filepath.Join(modulesPath, modulesName))\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t}\n//\t\terr = modules.DownloadModule(url, filepath.Join(modulesPath, modulesName))\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\t\treturn nil\n//\t} else {\n//\t\tif LatestVersion == \"\" {\n//\t\t\tmetadata, err := getModulesMetadata(context.Background(), modulesName)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\tLatestVersion = metadata.LatestVersion\n//\t\t}\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\t\t_, e := os.Stat(filepath.Join(modulesPath, modulesName))\n//\t\tif configMap[\"modules\"+\"/\"+modulesName] == LatestVersion && e == nil {\n//\t\t\treturn nil\n//\t\t} else {\n//\t\t\tsupplement, err := getModulesModulesSupplement(context.Background(), modulesName, LatestVersion)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\turl := supplement.Source + \"/releases/download/\" + LatestVersion + \"/\" + modulesName + \".zip\"\n//\t\t\terr = os.RemoveAll(filepath.Join(modulesPath, modulesName))\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\terr = modules.DownloadModule(url, modulesPath)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\tconfigMap[\"modules\"+\"/\"+modulesName] = LatestVersion\n//\t\t\tc, err := json.Marshal(configMap)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\terr = os.Remove(config)\n//\t\t\tif err != nil {\n//\t\t\t\treturn err\n//\t\t\t}\n//\t\t\terr = os.WriteFile(config, c, 0644)\n//\t\t}\n//\t}\n//\treturn nil\n//}\n\n//func GetPathBySource(source, version string) string {\n//\t_, config, err := Home()\n//\tif err != nil {\n//\t\treturn \"\"\n//\t}\n//\tc, err := os.ReadFile(config)\n//\tif err != nil {\n//\t\treturn \"\"\n//\t}\n//\tvar configMap = make(map[string]string)\n//\terr = json.Unmarshal(c, &configMap)\n//\tif err != nil {\n//\t\treturn \"\"\n//\t}\n//\n//\tss := strings.SplitN(source, \"@\", 2)\n//\n//\treturn configMap[ss[0]+\"@\"+version]\n//}\n\n//type ModuleMetadata struct {\n//\tName          string   `json:\"name\" yaml:\"name\"`\n//\tLatestVersion string   `json:\"latest-version\" yaml:\"latest-version\"`\n//\tLatestUpdate  string   `json:\"latest-updated\" yaml:\"latest-updated\"`\n//\tIntroduction  string   `json:\"introduction\" yaml:\"introduction\"`\n//\tVersions      []string `json:\"versions\" yaml:\"versions\"`\n//}\n//\n//type ModulesSupplement struct {\n//\tPackageName string `json:\"package-name\" yaml:\"package-name\"`\n//\tSource      string `json:\"source\" yaml:\"source\"`\n//\tChecksums   string `json:\"checksums\" yaml:\"checksums\"`\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/registry/module_local_registry.go",
    "content": "package registry\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n)\n\ntype ModuleLocalRegistry struct {\n\tregistryDirectory          string\n\tregistryGitHubRepoFullName string\n}\n\nvar _ ModuleRegistry = &ModuleLocalRegistry{}\n\nfunc NewModuleLocalRegistry(registryDirectory string, registryGitHubRepoFullName ...string) (*ModuleLocalRegistry, error) {\n\tstat, err := os.Stat(registryDirectory)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"visit registryDirectory %s error: %s\", registryDirectory, err.Error())\n\t}\n\tif !stat.IsDir() {\n\t\treturn nil, fmt.Errorf(\"%s is not registryDirectory\", registryDirectory)\n\t}\n\n\tif len(registryGitHubRepoFullName) == 0 {\n\t\tregistryGitHubRepoFullName = append(registryGitHubRepoFullName, ModuleGithubRegistryDefaultRepoFullName)\n\t}\n\n\treturn &ModuleLocalRegistry{\n\t\tregistryDirectory:          registryDirectory,\n\t\tregistryGitHubRepoFullName: registryGitHubRepoFullName[0],\n\t}, nil\n}\n\nfunc (x *ModuleLocalRegistry) CheckUpdate(ctx context.Context, module *Module) (*Module, error) {\n\tif module.IsLatestVersion() {\n\t\treturn nil, nil\n\t}\n\n\tmetaPath := filepath.Join(x.registryDirectory, ModulesListDirectoryName, module.Name, MetaDataFileName)\n\tmeta, err := utils.ReadYamlFile[*ModuleMetadata](metaPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif meta.LatestVersion == module.Version {\n\t\treturn nil, nil\n\t}\n\n\treturn NewModule(module.Name, meta.LatestUpdate), nil\n}\n\nfunc (x *ModuleLocalRegistry) Download(ctx context.Context, module *Module, options *ModuleRegistryDownloadOptions) (string, error) {\n\tregistry, err := NewModuleGitHubRegistry(NewModuleGithubRegistryOptions(x.registryDirectory, x.registryGitHubRepoFullName))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn downloadModule(ctx, registry.buildRegistryUrl(), module, options)\n}\n\nfunc (x *ModuleLocalRegistry) GetLatestVersion(ctx context.Context, module *Module) (*Module, error) {\n\tmetaPath := filepath.Join(x.registryDirectory, ModulesListDirectoryName, module.Name, MetaDataFileName)\n\tmeta, err := utils.ReadYamlFile[*ProviderMetadata](metaPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewModule(module.Name, meta.LatestVersion), nil\n}\n\nfunc (x *ModuleLocalRegistry) GetAllVersion(ctx context.Context, module *Module) ([]*Module, error) {\n\tmeta, err := x.GetMetadata(ctx, module)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tproviderSlice := make([]*Module, 0, len(meta.Versions))\n\tfor _, v := range meta.Versions {\n\t\tproviderSlice = append(providerSlice, NewModule(module.Name, v))\n\t}\n\treturn providerSlice, nil\n}\n\nfunc (x *ModuleLocalRegistry) GetMetadata(ctx context.Context, module *Module) (*ModuleMetadata, error) {\n\tmetaPath := filepath.Join(x.registryDirectory, ModulesListDirectoryName, module.Name, MetaDataFileName)\n\tmeta, err := utils.ReadYamlFile[*ModuleMetadata](metaPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn meta, nil\n}\n\nfunc (x *ModuleLocalRegistry) GetSupplement(ctx context.Context, module *Module) (*ModuleSupplement, error) {\n\tsupplementPath := filepath.Join(x.registryDirectory, ModulesListDirectoryName, module.Name, module.Version, SupplementFileName)\n\tsupplement, err := utils.ReadYamlFile[*ModuleSupplement](supplementPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn supplement, nil\n}\n\nfunc (x *ModuleLocalRegistry) List(ctx context.Context) ([]*Module, error) {\n\tmodulesListDirectoryPath := filepath.Join(x.registryDirectory, ModulesListDirectoryName)\n\tentrySlice, err := os.ReadDir(modulesListDirectoryPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tproviderSlice := make([]*Module, 0)\n\tfor _, entry := range entrySlice {\n\t\tif !entry.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tmetaFilePath := filepath.Join(modulesListDirectoryPath, entry.Name(), MetaDataFileName)\n\t\tmeta, err := utils.ReadYamlFile[*ModuleMetadata](metaFilePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tproviderSlice = append(providerSlice, NewModule(meta.Name, meta.LatestVersion))\n\t}\n\treturn providerSlice, nil\n}\n\nfunc (x *ModuleLocalRegistry) Search(ctx context.Context, keyword string) ([]*Module, error) {\n\tallModuleSlice, err := x.List(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeyword = strings.ToLower(keyword)\n\thitModuleSlice := make([]*Module, 0)\n\tfor _, module := range allModuleSlice {\n\t\tif strings.Contains(strings.ToLower(module.Name), keyword) {\n\t\t\thitModuleSlice = append(hitModuleSlice, module)\n\t\t}\n\t}\n\treturn hitModuleSlice, nil\n}\n"
  },
  {
    "path": "pkg/registry/module_registry.go",
    "content": "package registry\n\nimport (\n\t\"context\"\n\t\"github.com/hashicorp/go-getter\"\n\t\"github.com/selefra/selefra/pkg/version\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype Module struct {\n\t*version.NameAndVersion\n}\n\nfunc ParseModule(moduleNameAndVersion string) *Module {\n\treturn &Module{\n\t\tNameAndVersion: version.ParseNameAndVersion(moduleNameAndVersion),\n\t}\n}\n\nfunc NewModule(moduleName, moduleVersion string) *Module {\n\treturn &Module{\n\t\tNameAndVersion: version.NewNameAndVersion(moduleName, moduleVersion),\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ModuleMetadata struct {\n\tName          string   `json:\"name\" yaml:\"name\"`\n\tLatestVersion string   `json:\"latest-version\" yaml:\"latest-version\"`\n\tLatestUpdate  string   `json:\"latest-updated\" yaml:\"latest-updated\"`\n\tIntroduction  string   `json:\"introduction\" yaml:\"introduction\"`\n\tVersions      []string `json:\"versions\" yaml:\"versions\"`\n}\n\nfunc (x *ModuleMetadata) HasVersion(version string) bool {\n\tfor _, v := range x.Versions {\n\t\tif v == version {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype ModuleSupplement struct {\n\tPackageName string `json:\"package-name\" yaml:\"package-name\"`\n\tSource      string `json:\"source\" yaml:\"source\"`\n\tChecksums   string `json:\"checksum\" yaml:\"checksum\"`\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ModuleRegistryDownloadOptions struct {\n\n\t// Which directory to save the downloaded module to\n\tModuleDownloadDirectoryPath string\n\n\t// Whether to skip authentication. If the authentication is skipped, the checksum of the downloaded file is not verified\n\tSkipVerify *bool\n\n\t// Downloading can be time-consuming, so you can set up a monitor to track progress\n\tProgressTracker getter.ProgressTracker\n}\n\n// ModuleRegistry Used to represent a registry for a Module repository\ntype ModuleRegistry interface {\n\n\t// CheckUpdate Used to check whether a module has a newer version than the current one\n\tCheckUpdate(ctx context.Context, module *Module) (*Module, error)\n\n\t// Download the module to a local registryDirectory\n\tDownload(ctx context.Context, module *Module, options *ModuleRegistryDownloadOptions) (string, error)\n\n\t//// DeleteModule Delete the module downloaded locally\n\t//DeleteModule(localModuleInfo *LocalModule) error\n\n\t// GetLatestVersion Gets the latest version of a given module\n\tGetLatestVersion(ctx context.Context, module *Module) (*Module, error)\n\n\t// GetAllVersion Gets all versions of a given module\n\tGetAllVersion(ctx context.Context, module *Module) ([]*Module, error)\n\n\tGetMetadata(ctx context.Context, module *Module) (*ModuleMetadata, error)\n\n\tGetSupplement(ctx context.Context, module *Module) (*ModuleSupplement, error)\n\n\t// List Lists all modules installed locally\n\tList(ctx context.Context) ([]*Module, error)\n\n\t// Search Searches the remote registry for modules containing the given keyword\n\tSearch(ctx context.Context, keyword string) ([]*Module, error)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/registry/provider_github_registry.go",
    "content": "package registry\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/pointer\"\n\t\"github.com/selefra/selefra/pkg/http_client\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n\t\"github.com/selefra/selefra/pkg/telemetry\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/songzhibin97/gkit/ternary\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n)\n\n// ProviderGithubRegistryDefaultRepoFullName The official registry repository\nconst ProviderGithubRegistryDefaultRepoFullName = \"selefra/registry\"\n\nvar (\n\tproviderBinarySuffix = ternary.ReturnString(runtime.GOOS == \"windows\", \".exe\", \"\")\n)\n\n//func request(ctx context.Context, method string, _url string, body []byte, headers ...Header) ([]byte, error) {\n//\tvar lastErr error\n//\tfor tryTimes := 0; tryTimes < 5; tryTimes++ {\n//\t\tclient := &http.Client{}\n//\t\tsBody := strings.NewReader(string(body))\n//\t\trequest, err := http.NewRequestWithContext(ctx, method, _url, sBody)\n//\t\tif err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t\trequest.Header.Add(\"Content-Type\", \"application/json\")\n//\t\tfor _, header := range headers {\n//\t\t\trequest.Header.Add(header.Key, header.Value)\n//\t\t}\n//\n//\t\tresp, err := client.Do(request)\n//\t\tif err != nil {\n//\t\t\t//return nil, err\n//\t\t\tlastErr = err\n//\t\t\tcontinue\n//\t\t}\n//\t\t// just ok\n//\t\tdefer resp.Body.Close()\n//\t\tif resp.StatusCode != http.StatusOK {\n//\t\t\tlastErr = fmt.Errorf(\"request url %s response code %d not equal 200\", _url, resp.StatusCode)\n//\t\t\tcontinue\n//\t\t}\n//\t\trByte, err := ioutil.ReadAll(resp.Body)\n//\t\tif err != nil {\n//\t\t\tlastErr = fmt.Errorf(\"request url %s, read body err : %s\", _url, err.Error())\n//\t\t\tcontinue\n//\t\t}\n//\t\treturn rByte, err\n//\t}\n//\treturn nil, lastErr\n//}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ProviderGithubRegistryOptions struct {\n\tDownloadWorkspace    string\n\tRegistryRepoFullName *string\n}\n\nfunc NewProviderGithubRegistryOptions(downloadWorkspace string, registryRepoFullName ...string) *ProviderGithubRegistryOptions {\n\n\tif len(registryRepoFullName) == 0 {\n\t\tregistryRepoFullName = append(registryRepoFullName, ProviderGithubRegistryDefaultRepoFullName)\n\t}\n\n\treturn &ProviderGithubRegistryOptions{\n\t\tDownloadWorkspace:    downloadWorkspace,\n\t\tRegistryRepoFullName: pointer.ToStringPointer(registryRepoFullName[0]),\n\t}\n}\n\nfunc (x *ProviderGithubRegistryOptions) Check() *schema.Diagnostics {\n\t// TODO check params\n\treturn nil\n}\n\n// ProviderGithubRegistry provider registry github implementation\ntype ProviderGithubRegistry struct {\n\t// The owner of registry's repo\n\towner string\n\t// The name of registry's repo\n\trepoName string\n\n\toptions *ProviderGithubRegistryOptions\n}\n\nvar _ ProviderRegistry = &ProviderGithubRegistry{}\n\nfunc NewProviderGithubRegistry(options *ProviderGithubRegistryOptions) (*ProviderGithubRegistry, error) {\n\n\t// set default registry url\n\tif options.RegistryRepoFullName == nil {\n\t\toptions.RegistryRepoFullName = pointer.ToStringPointer(ProviderGithubRegistryDefaultRepoFullName)\n\t}\n\n\t// Parse the full name of the github repository\n\towner, repo, err := utils.ParseGitHubRepoFullName(pointer.FromStringPointer(options.RegistryRepoFullName))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ProviderGithubRegistry{\n\t\towner:    owner,\n\t\trepoName: repo,\n\t\toptions:  options,\n\t}, nil\n}\n\nfunc (x *ProviderGithubRegistry) buildRegistryRepoFullName() string {\n\treturn fmt.Sprintf(\"%s/%s\", x.owner, x.repoName)\n}\n\nfunc (x *ProviderGithubRegistry) buildRegistryUrl() string {\n\treturn fmt.Sprintf(\"https://raw.githubusercontent.com/%s/%s/\", x.owner, x.repoName)\n}\n\n//func (x *ProviderGithubRegistry) getProviderDownloadDirectory(providerName, providerVersion string) string {\n//\treturn filepath.Join(x.downloadWorkspace, ProvidersListDirectoryName, providerName, providerVersion)\n//}\n\nfunc (x *ProviderGithubRegistry) buildProviderRegistryDownloadDirectory() string {\n\treturn filepath.Join(x.options.DownloadWorkspace, \"registry/github\", x.owner, x.repoName)\n}\n\nfunc (x *ProviderGithubRegistry) List(ctx context.Context) ([]*Provider, error) {\n\tlocalRegistryDirectoryPath := x.buildProviderRegistryDownloadDirectory()\n\terr := http_client.NewGitHubRepoDownloader().Download(ctx, &http_client.GitHubRepoDownloaderOptions{\n\t\tOwner:             x.owner,\n\t\tRepo:              x.repoName,\n\t\tDownloadDirectory: localRegistryDirectoryPath,\n\t\tCacheTime:         pointer.ToDurationPointer(time.Hour),\n\t\t// TODO no ProgressListener, is ok?\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tregistryDirectory := filepath.Join(localRegistryDirectoryPath, x.repoName+\"-main\")\n\tregistry, err := NewProviderLocalRegistry(registryDirectory, x.buildRegistryRepoFullName())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn registry.List(ctx)\n}\n\nfunc (x *ProviderGithubRegistry) Search(ctx context.Context, keyword string) ([]*Provider, error) {\n\tallProviderSlice, err := x.List(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeyword = strings.ToLower(keyword)\n\tsearchResultSlice := make([]*Provider, 0)\n\tfor _, provider := range allProviderSlice {\n\t\tif strings.Contains(strings.ToLower(provider.Name), keyword) {\n\t\t\tsearchResultSlice = append(searchResultSlice, provider)\n\t\t}\n\t}\n\treturn searchResultSlice, nil\n}\n\nfunc (x *ProviderGithubRegistry) GetLatestVersion(ctx context.Context, provider *Provider) (*Provider, error) {\n\tmetadata, err := x.getProviderMetadata(ctx, provider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewProvider(metadata.Name, metadata.LatestVersion), nil\n}\n\nfunc (x *ProviderGithubRegistry) GetAllVersion(ctx context.Context, provider *Provider) ([]*Provider, error) {\n\tmetadata, err := x.getProviderMetadata(ctx, provider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tproviderSlice := make([]*Provider, 0)\n\tfor _, version := range metadata.Versions {\n\t\tproviderSlice = append(providerSlice, NewProvider(provider.Name, version))\n\t}\n\treturn providerSlice, nil\n}\n\nfunc (x *ProviderGithubRegistry) CheckUpdate(ctx context.Context, provider *Provider) (*Provider, error) {\n\n\tif provider.IsLatestVersion() {\n\t\treturn nil, nil\n\t}\n\n\tmetadata, err := x.getProviderMetadata(ctx, provider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// already is latest version\n\tif provider.Version == metadata.LatestVersion {\n\t\treturn nil, nil\n\t}\n\n\treturn NewProvider(provider.Name, metadata.LatestVersion), nil\n}\n\nfunc (x *ProviderGithubRegistry) getSupplement(ctx context.Context, provider *Provider) (*ProviderSupplement, error) {\n\tsupplementUrl := fmt.Sprintf(\"https://raw.githubusercontent.com/%s/%s/main/%s/%s/%s/%s\", x.owner, x.repoName, ProvidersListDirectoryName, provider.Name, provider.Version, SupplementFileName)\n\tsupplement, err := http_client.GetYaml[*ProviderSupplement](ctx, supplementUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn supplement, err\n\t//downloadUrl := supplement.Supplement.Source + \"/releases/download/\" + ProviderGithubRegistry.Version + \"/\" + ProviderGithubRegistry.Name + \"_\" + runtime.GOOS + \"_\" + runtime.GOARCH + \".tar.gz\"\n}\n\n//func (x *ProviderGithubRegistry) fillVersion(ctx context.Context, provider *Provider, skipVerify bool) error {\n//\tif provider.Version != \"\" && provider.Version != \"latest\" && skipVerify {\n//\t\treturn nil\n//\t}\n//\n//\tmetadata, err := x.getProviderMetadata(ctx, provider)\n//\tif err != nil {\n//\t\treturn err\n//\t}\n//\tif provider.Version != \"\" && provider.Version != \"latest\" {\n//\t\t// check version number exists\n//\t\tfor _, version := range metadata.Versions {\n//\t\t\tif provider.Version != version {\n//\t\t\t\tcontinue\n//\t\t\t}\n//\t\t\treturn nil\n//\t\t}\n//\t\treturn errors.New(\"version not found\")\n//\t}\n//\tprovider.Version = metadata.LatestVersion\n//\treturn nil\n//}\n\nfunc (x *ProviderGithubRegistry) getProviderMetadata(ctx context.Context, provider *Provider) (*ProviderMetadata, error) {\n\treturn getProviderMeta(ctx, x.buildRegistryUrl(), provider)\n}\n\nfunc (x *ProviderGithubRegistry) Download(ctx context.Context, provider *Provider, options *ProviderRegistryDownloadOptions) (string, error) {\n\treturn downloadProvider(ctx, x.buildRegistryUrl(), provider, options)\n}\n\nfunc (x *ProviderGithubRegistry) GetMetadata(ctx context.Context, provider *Provider) (*ProviderMetadata, error) {\n\treturn getProviderMeta(ctx, x.buildRegistryUrl(), provider)\n}\n\nfunc (x *ProviderGithubRegistry) GetSupplement(ctx context.Context, provider *Provider) (*ProviderSupplement, error) {\n\treturn getProviderSupplement(ctx, x.buildRegistryUrl(), provider)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc downloadProvider(ctx context.Context, registryUrl string, provider *Provider, options *ProviderRegistryDownloadOptions) (string, error) {\n\n\tif err := formatProviderVersion(ctx, registryUrl, provider); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsupplement, err := getProviderSupplement(ctx, registryUrl, provider)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := utils.EnsureDirectoryNotExists(options.ProviderDownloadDirectoryPath); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tgithubReleaseAssertName := supplement.PackageName + \"_\" + strings.Replace(provider.Version, \"v\", \"\", 1) + \"_\" + runtime.GOOS + \"_\" + runtime.GOARCH\n\n\t// TODO optimization, Improve compatibility\n\t// The providerBinarySuffix depends on the provider repository's CI. If that CI changes the providerBinarySuffix, it must be changed accordingly\n\tgithubReleaseAssertURL := supplement.Source + \"/releases/download/\" + provider.Version + \"/\" + githubReleaseAssertName + \".tar.gz\"\n\n\tif !pointer.FromBoolPointerOrDefault(options.SkipVerify, true) {\n\t\tchecksum, err := supplement.Checksums.selectChecksums()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tgithubReleaseAssertURL += \"?checksum=sha256:\" + checksum\n\t}\n\n\tevent := telemetry.NewEvent(\"provider-install\").\n\t\tAdd(\"url\", githubReleaseAssertURL).\n\t\tAdd(\"provider_name\", provider.Name).\n\t\tAdd(\"provider_version\", provider.Version)\n\td := telemetry.Submit(ctx, event)\n\tif utils.IsNotEmpty(d) {\n\t\tlogger.ErrorF(\"telemetry provider install, msg = %s\", d.String())\n\t}\n\n\t//targetUrl := cli_env.GetSelefraCloudHttpHost() + \"/diagnosis.tar.gz?url=\" + base64.StdEncoding.EncodeToString([]byte(githubReleaseAssertURL))\n\terr = http_client.DownloadToDirectory(ctx, options.ProviderDownloadDirectoryPath, githubReleaseAssertURL, options.ProgressTracker)\n\t//err = http_client.DownloadToDirectory(ctx, options.ProviderDownloadDirectoryPath, targetUrl, options.ProgressTracker)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// search download file\n\tproviderExecuteFilePath := filepath.Join(options.ProviderDownloadDirectoryPath, supplement.PackageName+providerBinarySuffix)\n\tif utils.Exists(providerExecuteFilePath) {\n\t\treturn providerExecuteFilePath, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"provider %s download failed\", supplement.PackageName)\n}\n\nfunc formatProviderVersion(ctx context.Context, registryUrl string, provider *Provider) error {\n\tif !provider.IsLatestVersion() {\n\t\treturn nil\n\t}\n\tmeta, err := getProviderMeta(ctx, registryUrl, provider)\n\tif err != nil {\n\t\treturn err\n\t}\n\tprovider.Version = meta.LatestVersion\n\treturn nil\n}\n\nfunc buildProviderDownloadPath(downloadWorkspace string, provider *Provider) string {\n\treturn fmt.Sprintf(\"%s/%s/%s/%s\", downloadWorkspace, ProvidersListDirectoryName, provider.Name, provider.Version)\n}\n\nfunc getProviderMeta(ctx context.Context, registryUrl string, provider *Provider) (*ProviderMetadata, error) {\n\tmetadataUrl := fmt.Sprintf(\"%smain/%s/%s/%s\", registryUrl, ProvidersListDirectoryName, provider.Name, MetaDataFileName)\n\tgetYaml, err := http_client.GetYaml[*ProviderMetadata](ctx, metadataUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getYaml, nil\n}\n\nfunc getProviderSupplement(ctx context.Context, registryUrl string, provider *Provider) (*ProviderSupplement, error) {\n\tsupplementUrl := fmt.Sprintf(\"%s/main/%s/%s/%s/%s\", registryUrl, ProvidersListDirectoryName, provider.Name, provider.Version, SupplementFileName)\n\tsupplement, err := http_client.GetYaml[*ProviderSupplement](ctx, supplementUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn supplement, err\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/registry/provider_github_registry_test.go",
    "content": "package registry\n\nimport (\n\t\"context\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc newTestProviderGithubRegistry() *ProviderGithubRegistry {\n\tregistry, err := NewProviderGithubRegistry(&ProviderGithubRegistryOptions{\n\t\tDownloadWorkspace: \"./test_download\",\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn registry\n}\n\nfunc TestProviderGithubRegistry_Download(t *testing.T) {\n\t// TODO\n\tdownload, err := newTestProviderGithubRegistry().Download(context.Background(), NewProvider(\"aws\", \"v0.0.1\"), &ProviderRegistryDownloadOptions{\n\t\tProviderDownloadDirectoryPath: \"./test_download/providers/aws/v0.0.1\",\n\t})\n\tassert.Nil(t, err)\n\tassert.NotEmpty(t, download)\n}\n\nfunc TestProviderGithubRegistry_List(t *testing.T) {\n\tproviderSlice, err := newTestProviderGithubRegistry().List(context.Background())\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providerSlice)\n}\n\nfunc TestProviderGithubRegistry_Search(t *testing.T) {\n\tproviders, err := newTestProviderGithubRegistry().Search(context.Background(), \"a\")\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providers)\n}\n\nfunc TestProviderGithubRegistry_GetAllVersion(t *testing.T) {\n\tproviders, err := newTestProviderGithubRegistry().GetAllVersion(context.Background(), NewProvider(\"aws\", \"\"))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providers)\n}\n\nfunc TestProviderGithubRegistry_GetMetadata(t *testing.T) {\n\tproviderMetadata, err := newTestProviderGithubRegistry().GetMetadata(context.Background(), NewProvider(\"aws\", \"\"))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providerMetadata)\n}\n\nfunc TestProviderGithubRegistry_GetSupplement(t *testing.T) {\n\tproviderSupplement, err := newTestProviderGithubRegistry().GetSupplement(context.Background(), NewProvider(\"aws\", \"v0.0.10\"))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providerSupplement)\n}\n\nfunc TestProviderGithubRegistry_GetLatestVersion(t *testing.T) {\n\tproviderSupplement, err := newTestProviderGithubRegistry().GetLatestVersion(context.Background(), NewProvider(\"aws\", \"\"))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providerSupplement)\n}\n\nfunc TestProviderGithubRegistry_CheckUpdate(t *testing.T) {\n\tproviderSupplement, err := newTestProviderGithubRegistry().CheckUpdate(context.Background(), NewProvider(\"aws\", \"v0.0.9\"))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providerSupplement)\n}\n"
  },
  {
    "path": "pkg/registry/provider_local_registry.go",
    "content": "package registry\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n)\n\n// ProviderLocalRegistry The local path implementation of the provider repository\ntype ProviderLocalRegistry struct {\n\tregistryDirectory          string\n\tregistryGitHubRepoFullName string\n}\n\nvar _ ProviderRegistry = (*ProviderLocalRegistry)(nil)\n\nfunc NewProviderLocalRegistry(registryDirectory string, registryGitHubRepoFullName ...string) (*ProviderLocalRegistry, error) {\n\tstat, err := os.Stat(registryDirectory)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"visit registryDirectory %s error: %s\", registryDirectory, err.Error())\n\t}\n\tif !stat.IsDir() {\n\t\treturn nil, fmt.Errorf(\"%s is not registryDirectory\", registryDirectory)\n\t}\n\n\tif len(registryGitHubRepoFullName) == 0 {\n\t\tregistryGitHubRepoFullName = append(registryGitHubRepoFullName, ProviderGithubRegistryDefaultRepoFullName)\n\t}\n\n\treturn &ProviderLocalRegistry{\n\t\tregistryDirectory:          registryDirectory,\n\t\tregistryGitHubRepoFullName: registryGitHubRepoFullName[0],\n\t}, nil\n}\n\nfunc (x *ProviderLocalRegistry) CheckUpdate(ctx context.Context, provider *Provider) (*Provider, error) {\n\n\tif provider.IsLatestVersion() {\n\t\treturn nil, nil\n\t}\n\n\tmetaPath := filepath.Join(x.registryDirectory, ProvidersListDirectoryName, provider.Name, MetaDataFileName)\n\tmeta, err := utils.ReadYamlFile[*ProviderMetadata](metaPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif meta.LatestVersion == provider.Version {\n\t\treturn nil, nil\n\t}\n\n\treturn NewProvider(provider.Name, meta.LatestVersion), nil\n}\n\nfunc (x *ProviderLocalRegistry) GetMetadata(ctx context.Context, provider *Provider) (*ProviderMetadata, error) {\n\tmetaPath := filepath.Join(x.registryDirectory, ProvidersListDirectoryName, provider.Name, MetaDataFileName)\n\tmeta, err := utils.ReadYamlFile[*ProviderMetadata](metaPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn meta, nil\n}\n\nfunc (x *ProviderLocalRegistry) GetSupplement(ctx context.Context, provider *Provider) (*ProviderSupplement, error) {\n\tsupplementPath := filepath.Join(x.registryDirectory, ProvidersListDirectoryName, provider.Name, provider.Version, SupplementFileName)\n\tsupplement, err := utils.ReadYamlFile[*ProviderSupplement](supplementPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn supplement, nil\n}\n\nfunc (x *ProviderLocalRegistry) GetLatestVersion(ctx context.Context, provider *Provider) (*Provider, error) {\n\tmetaPath := filepath.Join(x.registryDirectory, ProvidersListDirectoryName, provider.Name, MetaDataFileName)\n\tmeta, err := utils.ReadYamlFile[*ProviderMetadata](metaPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewProvider(provider.Name, meta.LatestVersion), nil\n}\n\nfunc (x *ProviderLocalRegistry) GetAllVersion(ctx context.Context, provider *Provider) ([]*Provider, error) {\n\tmeta, err := x.GetMetadata(ctx, provider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tproviderSlice := make([]*Provider, 0, len(meta.Versions))\n\tfor _, v := range meta.Versions {\n\t\tproviderSlice = append(providerSlice, NewProvider(provider.Name, v))\n\t}\n\treturn providerSlice, nil\n}\n\nfunc (x *ProviderLocalRegistry) Download(ctx context.Context, provider *Provider, options *ProviderRegistryDownloadOptions) (string, error) {\n\tregistry, err := NewProviderGithubRegistry(NewProviderGithubRegistryOptions(x.registryDirectory, x.registryGitHubRepoFullName))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn downloadProvider(ctx, registry.buildRegistryUrl(), provider, options)\n}\n\nfunc (x *ProviderLocalRegistry) Search(ctx context.Context, keyword string) ([]*Provider, error) {\n\tallProviderSlice, err := x.List(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeyword = strings.ToLower(keyword)\n\thitProviderSlice := make([]*Provider, 0)\n\tfor _, provider := range allProviderSlice {\n\t\tif strings.Contains(strings.ToLower(provider.Name), keyword) {\n\t\t\thitProviderSlice = append(hitProviderSlice, provider)\n\t\t}\n\t}\n\treturn hitProviderSlice, nil\n}\n\nfunc (x *ProviderLocalRegistry) List(ctx context.Context) ([]*Provider, error) {\n\tprovidersListDirectoryPath := filepath.Join(x.registryDirectory, ProvidersListDirectoryName)\n\tentrySlice, err := os.ReadDir(providersListDirectoryPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tproviderSlice := make([]*Provider, 0)\n\tfor _, entry := range entrySlice {\n\t\tif !entry.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tif entry.Name() == \"template\" {\n\t\t\tcontinue\n\t\t}\n\t\tmetaFilePath := filepath.Join(providersListDirectoryPath, entry.Name(), MetaDataFileName)\n\t\tmeta, err := utils.ReadYamlFile[*ProviderMetadata](metaFilePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tproviderSlice = append(providerSlice, NewProvider(meta.Name, meta.LatestVersion))\n\t}\n\treturn providerSlice, nil\n}\n"
  },
  {
    "path": "pkg/registry/provider_local_registry_test.go",
    "content": "package registry\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-utils/pkg/pointer\"\n\t\"github.com/selefra/selefra/pkg/http_client\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"path/filepath\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc newTestProviderLocalRegistry() *ProviderLocalRegistry {\n\tdownloadDirectory := \"./test_download/registry/github/selefra/registry/\"\n\terr := http_client.NewGitHubRepoDownloader().Download(context.Background(), &http_client.GitHubRepoDownloaderOptions{\n\t\tOwner:             \"selefra\",\n\t\tRepo:              \"registry\",\n\t\tDownloadDirectory: downloadDirectory,\n\t\tCacheTime:         pointer.ToDurationPointer(time.Hour),\n\t\t// TODO no ProgressListener, is ok?\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tregistryDirectory := filepath.Join(downloadDirectory + \"/registry-main\")\n\tregistry, err := NewProviderLocalRegistry(registryDirectory)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tregistry, err = NewProviderLocalRegistry(registryDirectory)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn registry\n}\n\nfunc TestProviderLocalRegistry_Download(t *testing.T) {\n\tdownload, err := newTestProviderLocalRegistry().Download(context.Background(), NewProvider(\"aws\", \"v0.0.1\"), &ProviderRegistryDownloadOptions{\n\t\tProviderDownloadDirectoryPath: \"./test_download/providers/aws/v0.0.1\",\n\t})\n\tassert.Nil(t, err)\n\tassert.NotEmpty(t, download)\n}\n\nfunc TestProviderLocalRegistry_List(t *testing.T) {\n\tproviderSlice, err := newTestProviderLocalRegistry().List(context.Background())\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providerSlice)\n}\n\nfunc TestProviderLocalRegistry_Search(t *testing.T) {\n\tproviders, err := newTestProviderLocalRegistry().Search(context.Background(), \"a\")\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providers)\n}\n\nfunc TestProviderLocalRegistry_GetAllVersion(t *testing.T) {\n\tproviders, err := newTestProviderLocalRegistry().GetAllVersion(context.Background(), NewProvider(\"aws\", \"\"))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providers)\n}\n\nfunc TestProviderLocalRegistry_GetMetadata(t *testing.T) {\n\tproviderMetadata, err := newTestProviderLocalRegistry().GetMetadata(context.Background(), NewProvider(\"aws\", \"\"))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providerMetadata)\n}\n\nfunc TestProviderLocalRegistry_GetSupplement(t *testing.T) {\n\tproviderSupplement, err := newTestProviderLocalRegistry().GetSupplement(context.Background(), NewProvider(\"aws\", \"v0.0.10\"))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providerSupplement)\n}\n\nfunc TestProviderLocalRegistry_GetLatestVersion(t *testing.T) {\n\tproviderSupplement, err := newTestProviderLocalRegistry().GetLatestVersion(context.Background(), NewProvider(\"aws\", \"\"))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providerSupplement)\n}\n\nfunc TestProviderLocalRegistry_CheckUpdate(t *testing.T) {\n\tproviderSupplement, err := newTestProviderLocalRegistry().CheckUpdate(context.Background(), NewProvider(\"aws\", \"v0.0.9\"))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, providerSupplement)\n}\n"
  },
  {
    "path": "pkg/registry/provider_registry.go",
    "content": "package registry\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"github.com/hashicorp/go-getter\"\n\t\"github.com/selefra/selefra/pkg/version\"\n\t\"runtime\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst (\n\tProvidersListDirectoryName = \"provider\"\n\tMetaDataFileName           = \"metadata.yaml\"\n\tSupplementFileName         = \"supplement.yaml\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype Provider struct {\n\t*version.NameAndVersion\n}\n\nfunc NewProvider(providerName, providerVersion string) *Provider {\n\treturn &Provider{\n\t\tNameAndVersion: version.NewNameAndVersion(providerName, providerVersion),\n\t}\n}\n\n// ParseProvider example: aws@v0.0.1\nfunc ParseProvider(providerNameAndVersion string) *Provider {\n\tnameAndVersion := version.ParseNameAndVersion(providerNameAndVersion)\n\treturn &Provider{\n\t\tNameAndVersion: nameAndVersion,\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\ntype ProviderMetadata struct {\n\tName          string   `json:\"name\" yaml:\"name\"`\n\tLatestVersion string   `json:\"latest-version\" yaml:\"latest-version\"`\n\tLatestUpdate  string   `json:\"latest-updated\" yaml:\"latest-updated\"`\n\tIntroduction  string   `json:\"introduction\" yaml:\"introduction\"`\n\tVersions      []string `json:\"versions\" yaml:\"versions\"`\n}\n\nfunc (x *ProviderMetadata) HasVersion(version string) bool {\n\tfor _, v := range x.Versions {\n\t\tif v == version {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype ProviderSupplement struct {\n\tPackageName string    `json:\"package-name\" yaml:\"package-name\"`\n\tSource      string    `json:\"source\" yaml:\"source\"`\n\tChecksums   Checksums `json:\"checksums\" yaml:\"checksums\"`\n}\n\ntype Checksums struct {\n\tLinuxArm64   string `json:\"linux_arm64\" yaml:\"linux_arm64\"`\n\tLinuxAmd64   string `json:\"linux_amd64\" yaml:\"linux_amd64\"`\n\tWindowsArm64 string `json:\"windows_arm64\" yaml:\"windows_arm64\"`\n\tWindowsAmd64 string `json:\"windows_amd64\" yaml:\"windows_amd64\"`\n\tDarwinArm64  string `json:\"darwin_arm64\" yaml:\"darwin_arm64\"`\n\tDarwinAmd64  string `json:\"darwin_amd64\" yaml:\"darwin_amd64\"`\n}\n\nfunc (x *Checksums) selectChecksums() (string, error) {\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\tswitch runtime.GOARCH {\n\t\tcase \"amd64\":\n\t\t\treturn x.DarwinAmd64, nil\n\t\tcase \"arm64\":\n\t\t\treturn x.DarwinArm64, nil\n\t\tdefault:\n\t\t\treturn \"\", errors.New(\"unsupported arch\")\n\t\t}\n\tcase \"windows\":\n\t\tswitch runtime.GOARCH {\n\t\tcase \"amd64\":\n\t\t\treturn x.WindowsAmd64, nil\n\t\tcase \"arm64\":\n\t\t\treturn x.WindowsArm64, nil\n\t\tdefault:\n\t\t\treturn \"\", errors.New(\"unsupported arch\")\n\t\t}\n\tcase \"linux\":\n\t\tswitch runtime.GOARCH {\n\t\tcase \"amd64\":\n\t\t\treturn x.LinuxAmd64, nil\n\t\tcase \"arm64\":\n\t\t\treturn x.LinuxArm64, nil\n\t\tdefault:\n\t\t\treturn \"\", errors.New(\"unsupported arch\")\n\t\t}\n\tdefault:\n\t\treturn \"\", errors.New(\"unsupported os\")\n\t}\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// ProviderRegistryDownloadOptions Some options when downloading the provider\ntype ProviderRegistryDownloadOptions struct {\n\n\t// Which directory to save the downloaded provider to\n\tProviderDownloadDirectoryPath string\n\n\t// Whether to skip authentication. If the authentication is skipped, the checksum of the downloaded file is not verified\n\tSkipVerify *bool\n\n\t// Downloading can be time-consuming, so you can set up a monitor to track progress\n\tProgressTracker getter.ProgressTracker\n}\n\n// ProviderRegistry Used to represent the registry of a provider\ntype ProviderRegistry interface {\n\n\t// CheckUpdate Check whether the given provider has a newer version\n\tCheckUpdate(ctx context.Context, provider *Provider) (*Provider, error)\n\n\t// GetLatestVersion Gets the latest version of the specified provider\n\tGetLatestVersion(ctx context.Context, provider *Provider) (*Provider, error)\n\n\t// GetAllVersion Gets all versions of the given provider\n\tGetAllVersion(ctx context.Context, provider *Provider) ([]*Provider, error)\n\n\tGetMetadata(ctx context.Context, provider *Provider) (*ProviderMetadata, error)\n\n\tGetSupplement(ctx context.Context, provider *Provider) (*ProviderSupplement, error)\n\n\t// Download the provider of the given version\n\tDownload(ctx context.Context, provider *Provider, options *ProviderRegistryDownloadOptions) (string, error)\n\n\t// Search for providers in registry by keyword\n\tSearch(ctx context.Context, keyword string) ([]*Provider, error)\n\n\t// List Lists all providers on registry\n\tList(ctx context.Context) ([]*Provider, error)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/selefra_workspace/device_id.go",
    "content": "package selefra_workspace\n\nimport (\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"path/filepath\"\n\t\"sync\"\n)\n\n// DeviceInformation Stores information about the current device\n// We will save a device file in the working directory of selefra.\n// This file is used for coordination in case of distributed conflicts.\n// Please do not manually edit or delete this file, otherwise it may cause program errors\ntype DeviceInformation struct {\n\n\t// This is the only ID available, This is for distributed collaboration with other nodes\n\t// There is no device information collection at present, and there will not be any in the future.\n\t// I hope to make a real open source and mutual assistance software, and my boss also thinks so\n\tID string `json:\"id\"`\n}\n\nvar deviceInformationOnce sync.Once\n\n// GetDeviceID Gets the ID of this device\nfunc GetDeviceID() (string, *schema.Diagnostics) {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\t// Ensure that the device file exists\n\tdeviceInformationOnce.Do(func() {\n\t\td := EnsureDeviceIDExists()\n\t\tif utils.HasError(d) {\n\t\t\tlogger.ErrorF(\"EnsureDeviceIDExists error: %s\", d.String())\n\t\t}\n\t})\n\n\tpath, err := GetDeviceInformationFilePath()\n\tif err != nil {\n\t\treturn \"\", diagnostics.AddErrorMsg(\"get device file path error: %s\", err.Error())\n\t}\n\n\tinformation, err := utils.ReadJsonFile[*DeviceInformation](path)\n\tif err != nil {\n\t\treturn \"\", diagnostics.AddErrorMsg(\"read device file error: %s\", err.Error())\n\t}\n\tif information == nil || information.ID == \"\" {\n\t\treturn \"\", diagnostics.AddErrorMsg(\"device id not found\")\n\t}\n\treturn information.ID, nil\n}\n\n// EnsureDeviceIDExists Ensure that the device file exists\nfunc EnsureDeviceIDExists() *schema.Diagnostics {\n\n\tdiagnostics := schema.NewDiagnostics()\n\n\tpath, err := GetDeviceInformationFilePath()\n\tif err != nil {\n\t\treturn diagnostics.AddErrorMsg(\"get device file path error: %s\", err.Error())\n\t}\n\n\t// If the device file already exists, it is not generated again\n\tinformation, err := utils.ReadJsonFile[*DeviceInformation](path)\n\tif err == nil || (information != nil && information.ID != \"\") {\n\t\treturn nil\n\t}\n\n\tinformation = &DeviceInformation{\n\t\tID: id_util.RandomId(),\n\t}\n\terr = utils.WriteJsonFile(path, information)\n\tif err != nil {\n\t\treturn diagnostics.AddErrorMsg(\"write device file error: %s\", err.Error())\n\t}\n\treturn diagnostics\n}\n\n// GetDeviceInformationFilePath Obtain the directory for storing device files\nfunc GetDeviceInformationFilePath() (string, error) {\n\tselefraHomeWorkspace, err := GetSelefraWorkspaceDirectory()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(selefraHomeWorkspace, \"device.json\"), nil\n}\n"
  },
  {
    "path": "pkg/selefra_workspace/device_id_test.go",
    "content": "package selefra_workspace\n\nimport (\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"testing\"\n)\n\nfunc TestGetDeviceID(t *testing.T) {\n\tid, diagnostics := GetDeviceID()\n\tif utils.HasError(diagnostics) {\n\t\tt.Log(diagnostics.String())\n\t}\n\tt.Log(id)\n}\n"
  },
  {
    "path": "pkg/selefra_workspace/workspace.go",
    "content": "package selefra_workspace\n\nimport \"github.com/selefra/selefra/config\"\n\n// GetSelefraWorkspaceDirectory Gets the path of the workspace for selefra\nfunc GetSelefraWorkspaceDirectory() (string, error) {\n\t// TODO Migrate the concrete implementation here\n\treturn config.GetSelefraHomeWorkspacePath()\n}\n"
  },
  {
    "path": "pkg/storage/pgstorage/constants.go",
    "content": "package pgstorage\n\nconst LockId = \"selefra-fetch-lock\"\n"
  },
  {
    "path": "pkg/storage/pgstorage/pgstorage-bak.go",
    "content": "package pgstorage\n\n//import (\n//\t\"context\"\n//\t\"errors\"\n//\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n//\t\"github.com/selefra/selefra-provider-sdk/storage\"\n//\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n//\t\"github.com/selefra/selefra-provider-sdk/storage_factory\"\n//\t\"github.com/selefra/selefra/config\"\n//\t\"github.com/selefra/selefra/global\"\n//\n//\t\"github.com/selefra/selefra/pkg/oci\"\n//\t\"github.com/selefra/selefra/ui\"\n//)\n//\n//type Option func(pgopts *postgresql_storage.PostgresqlStorageOptions)\n//\n//func DefaultPgStorageOpts() *postgresql_storage.PostgresqlStorageOptions {\n//\tdsn := getDsn()\n//\n//\tpgopts := postgresql_storage.NewPostgresqlStorageOptions(dsn)\n//\n//\treturn pgopts\n//}\n//\n//func WithSearchPath(searchPath string) Option {\n//\treturn func(pgopts *postgresql_storage.PostgresqlStorageOptions) {\n//\t\tpgopts.SearchPath = searchPath\n//\t}\n//}\n//\n//func PgStorageWithMeta(ctx context.Context, meta *schema.ClientMeta, opts ...Option) (*postgresql_storage.PostgresqlStorage, error) {\n//\tpgopts := DefaultPgStorageOpts()\n//\n//\tfor _, opt := range opts {\n//\t\topt(pgopts)\n//\t}\n//\n//\tstorage, diag := postgresql_storage.NewPostgresqlStorage(ctx, pgopts)\n//\tif diag != nil {\n//\t\tif diag != nil {\n//\t\t\terr := ui.PrintDiagnostic(diag.GetDiagnosticSlice())\n//\t\t\tif err != nil {\n//\t\t\t\treturn nil, errors.New(`The database maybe not ready.\n//\t\tYou can execute the following command to install the official database image.\n//\t\tdocker run --name selefra_postgres -p 5432:5432 -e POSTGRES_PASSWORD=pass -d postgres\\n`)\n//\t\t\t}\n//\t\t}\n//\t}\n//\n//\tstorage.SetClientMeta(meta)\n//\n//\treturn storage, nil\n//}\n//\n//func PgStorage(ctx context.Context, opts ...Option) (*postgresql_storage.PostgresqlStorage, *schema.Diagnostics) {\n//\tpgopts := DefaultPgStorageOpts()\n//\n//\tfor _, opt := range opts {\n//\t\topt(pgopts)\n//\t}\n//\n//\treturn postgresql_storage.NewPostgresqlStorage(ctx, pgopts)\n//}\n//\n//func Storage(ctx context.Context, opts ...Option) (storage.Storage, *schema.Diagnostics) {\n//\tpgopts := DefaultPgStorageOpts()\n//\n//\tfor _, opt := range opts {\n//\t\topt(pgopts)\n//\t}\n//\n//\treturn storage_factory.NewStorage(ctx, storage_factory.StorageTypePostgresql, pgopts)\n//}\n//\n//func GetStorageValue(ctx context.Context, storage *postgresql_storage.PostgresqlStorage, key string) (string, error) {\n//\tv, diag := storage.GetValue(ctx, key)\n//\tif diag != nil {\n//\t\terr := ui.PrintDiagnostic(diag.GetDiagnosticSlice())\n//\t\tif err != nil {\n//\t\t\treturn \"\", err\n//\t\t}\n//\t}\n//\treturn v, nil\n//}\n//\n//func SetStorageValue(ctx context.Context, storage *postgresql_storage.PostgresqlStorage, key, value string) error {\n//\tif diag := storage.SetKey(ctx, key, value); diag != nil {\n//\t\terr := ui.PrintDiagnostic(diag.GetDiagnosticSlice())\n//\t\tif err != nil {\n//\t\t\treturn err\n//\t\t}\n//\t}\n//\n//\treturn nil\n//}\n//\n////func getDsn() (dsn string) {\n////\tvar err error\n////\tif global.Token() != \"\" && global.RelvPrjName() != \"\" {\n////\t\tdsn, err = http_client.GetDsn(global.Token())\n////\t\tif err != nil {\n////\t\t\tui.Errorln(err.Error())\n////\t\t\treturn \"\"\n////\t\t}\n////\t}\n////\n////\terr = oci.RunDB()\n////\tif err != nil {\n////\t\tui.Errorln(err.Error())\n////\t\treturn \"\"\n////\t}\n////\tdb := &config.DB{\n////\t\tDriver:   \"\",\n////\t\tType:     \"postgres\",\n////\t\tUsername: \"postgres\",\n////\t\tPassword: \"pass\",\n////\t\tHost:     \"localhost\",\n////\t\tPort:     \"15432\",\n////\t\tDatabase: \"postgres\",\n////\t\tSSLMode:  \"disable\",\n////\t\tExtras:   nil,\n////\t}\n////\tdsn = \"host=\" + db.Host + \" user=\" + db.Username + \" password=\" + db.Password + \" port=\" + db.Port + \" dbname=\" + db.Database + \" \" + \"sslmode=disable\"\n////\treturn\n////}\n"
  },
  {
    "path": "pkg/storage/pgstorage/pgstorage-bak_test.go",
    "content": "package pgstorage\n\n//import (\n//\t\"context\"\n//\t\"github.com/stretchr/testify/require\"\n//\t\"testing\"\n//)\n//\n//func Test_Storage(t *testing.T) {\n//\tctx := context.Background()\n//\tsto, diag := Storage(ctx)\n//\n//\trequire.NotNil(t, sto, \"build storage failed\")\n//\n//\trequire.Nil(t, diag)\n//}\n"
  },
  {
    "path": "pkg/storage/pgstorage/pgstorage.go",
    "content": "package pgstorage\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n\t\"github.com/selefra/selefra/cli_ui\"\n\t\"github.com/selefra/selefra/pkg/message\"\n\t\"github.com/selefra/selefra/pkg/modules/module\"\n\t\"github.com/selefra/selefra/pkg/oci\"\n\t\"github.com/songzhibin97/gkit/tools/pointer\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype Option func(pgopts *postgresql_storage.PostgresqlStorageOptions)\n\n//func DefaultPgStorageOpts() *postgresql_storage.PostgresqlStorageOptions {\n//\tdsn := getDsn()\n//\n//\tpgopts := postgresql_storage.NewPostgresqlStorageOptions(dsn)\n//\n//\treturn pgopts\n//}\n\nfunc WithSearchPath(searchPath string) Option {\n\treturn func(pgopts *postgresql_storage.PostgresqlStorageOptions) {\n\t\tpgopts.SearchPath = searchPath\n\t}\n}\n\n//func PgStorageWithMeta(ctx context.Context, meta *schema.ClientMeta, opts ...Option) (*postgresql_storage.PostgresqlStorage, error) {\n//\tpgopts := DefaultPgStorageOpts()\n//\n//\tfor _, opt := range opts {\n//\t\topt(pgopts)\n//\t}\n//\n//\tstorage, diag := postgresql_storage.NewPostgresqlStorage(ctx, pgopts)\n//\tif diag != nil {\n//\t\tif diag != nil {\n//\t\t\terr := cli_ui.PrintDiagnostic(diag.GetDiagnosticSlice())\n//\t\t\tif err != nil {\n//\t\t\t\treturn nil, errors.New(`The database maybe not ready.\n//\t\tYou can execute the following command to install the official database image.\n//\t\tdocker run --name selefra_postgres -p 5432:5432 -e POSTGRES_PASSWORD=pass -d postgres\\n`)\n//\t\t\t}\n//\t\t}\n//\t}\n//\n//\tstorage.SetClientMeta(meta)\n//\n//\treturn storage, nil\n//}\n//\n//func PgStorage(ctx context.Context, opts ...Option) (*postgresql_storage.PostgresqlStorage, *schema.Diagnostics) {\n//\tpgopts := DefaultPgStorageOpts()\n//\n//\tfor _, opt := range opts {\n//\t\topt(pgopts)\n//\t}\n//\n//\treturn postgresql_storage.NewPostgresqlStorage(ctx, pgopts)\n//}\n//\n//func Storage(ctx context.Context, opts ...Option) (storage.Storage, *schema.Diagnostics) {\n//\tpgopts := DefaultPgStorageOpts()\n//\n//\tfor _, opt := range opts {\n//\t\topt(pgopts)\n//\t}\n//\n//\treturn storage_factory.NewStorage(ctx, storage_factory.StorageTypePostgresql, pgopts)\n//}\n\nfunc GetStorageValue(ctx context.Context, storage *postgresql_storage.PostgresqlStorage, key string) (string, error) {\n\tv, diag := storage.GetValue(ctx, key)\n\tif diag != nil {\n\t\terr := cli_ui.PrintDiagnostic(diag.GetDiagnosticSlice())\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn v, nil\n}\n\nfunc SetStorageValue(ctx context.Context, storage *postgresql_storage.PostgresqlStorage, key, value string) error {\n\tif diag := storage.SetKey(ctx, key, value); diag != nil {\n\t\terr := cli_ui.PrintDiagnostic(diag.GetDiagnosticSlice())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nvar runOCIPostgreSQLOnce sync.Once\n\n// DefaultPostgreSQL\n// 1. If the default Postgresql is not installed, install it\n// 2. If the default Postgresql is not started, start it\n// 3. Return the default Postgresql DSN connection\nfunc DefaultPostgreSQL(downloadWorkspace string, messageChannel *message.Channel[*schema.Diagnostics]) string {\n\n\tdefer func() {\n\t\tmessageChannel.SenderWaitAndClose()\n\t}()\n\n\tisRunSuccess := true\n\n\t//runOCIPostgreSQLOnce.Do(func() {\n\t//\tmessageChannel.Send(schema.NewDiagnostics().AddInfo(\"Use built-in PostgreSQL database...\"))\n\t//\tdownloader := oci.NewPostgreSQLDownloader(&oci.PostgreSQLDownloaderOptions{\n\t//\t\tMessageChannel:    messageChannel.MakeChildChannel(),\n\t//\t\tDownloadDirectory: downloadWorkspace,\n\t//\t})\n\t//\tisRunSuccess = downloader.Run(context.Background())\n\t//})\n\t//messageChannel.Send(schema.NewDiagnostics().AddInfo(\"Use built-in PostgreSQL database...\"))\n\tdownloader := oci.NewPostgreSQLDownloader(&oci.PostgreSQLDownloaderOptions{\n\t\tMessageChannel:    messageChannel.MakeChildChannel(),\n\t\tDownloadDirectory: downloadWorkspace,\n\t})\n\tisRunSuccess = downloader.Run(context.Background())\n\n\t// If the built-in Postgresql does not start successfully, a prompt is returned asking what to do next\n\tif !isRunSuccess {\n\t\terrorMsg := `\n\nSorry, the built-in Postgresql fails to start, please configure your own Postgresql connection\nexport SELEFRA_DATABASE_DSN='host=127.0.0.1 user=postgres password=pass port=15432 dbname=postgres sslmode=disable'\n\nIf you do not already have Postgresql installed, You can start an instance of Postgresql using Docker:\nsudo docker run -d --name selefra-postgres -p 15432:5432 -e POSTGRES_PASSWORD=pass postgres:14\n\nOr you can download and install Postgresql from its official website: \nhttps://www.postgresql.org/download/\n\nYou can check out our documentation: https://www.selefra.io/docs/faq#how-to-use-postgresql\n\n`\n\t\tmessageChannel.Send(schema.NewDiagnostics().AddErrorMsg(errorMsg))\n\t\treturn \"\"\n\t}\n\n\tdb := &module.ConnectionBlock{\n\t\tType:     \"postgres\",\n\t\tUsername: \"postgres\",\n\t\tPassword: oci.DefaultPostgreSQLPasswd,\n\t\tHost:     \"localhost\",\n\t\tPort:     pointer.ToUint64Pointer(oci.DefaultPostgreSQLPort),\n\t\tDatabase: \"postgres\",\n\t\tSSLMode:  \"disable\",\n\t\tExtras:   nil,\n\t}\n\treturn db.BuildDSN()\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// GetSchemaKey return provider schema named <required.name>_<required_version>_<provider_name>\nfunc GetSchemaKey(providerName, providerVersion string, providerConfigurationBlock *module.ProviderBlock) string {\n\tsourceArr := strings.Split(providerName, \"/\")\n\tvar source string\n\tif len(sourceArr) > 1 {\n\t\tsource = strings.Replace(sourceArr[1]+\"@\"+providerVersion, \"/\", \"_\", -1)\n\t} else {\n\t\tsource = strings.Replace(sourceArr[0]+\"@\"+providerVersion, \"/\", \"_\", -1)\n\t}\n\tsource = strings.Replace(source, \"@\", \"_\", -1)\n\tsource = strings.Replace(source, \".\", \"\", -1)\n\tif providerConfigurationBlock != nil {\n\t\tsource = source + \"_\" + providerConfigurationBlock.Name\n\t}\n\treturn source\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/storage/pgstorage/schema_owner_information.go",
    "content": "package pgstorage\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-provider-sdk/storage\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nconst SchemaOwnerKey = \"schema-owner-key\"\n\n// SchemaOwnerInformation schema is held by whom, information about the holder\ntype SchemaOwnerInformation struct {\n\n\t// The host name of the holder\n\tHostname string `json:\"hostname\"`\n\n\t// Holder's ID\n\tHolderID string `json:\"holder_id\"`\n\n\t// The name of the holder's configuration file\n\tConfigurationName string `json:\"configuration_name\"`\n\n\t// MD5 configured when pulling data from this schema\n\tConfigurationMD5 string `json:\"configuration_md5\"`\n}\n\nfunc GetSchemaOwner(ctx context.Context, storage storage.Storage) (*SchemaOwnerInformation, *schema.Diagnostics) {\n\tvalue, diagnostics := storage.GetValue(ctx, SchemaOwnerKey)\n\tif utils.HasError(diagnostics) {\n\t\treturn nil, diagnostics\n\t}\n\tif value == \"\" {\n\t\treturn nil, nil\n\t}\n\townerInformation := &SchemaOwnerInformation{}\n\terr := json.Unmarshal([]byte(value), &ownerInformation)\n\tif err != nil {\n\t\treturn nil, schema.NewDiagnostics().AddErrorMsg(\"failed to unmarshal schema owner: %s, s = %s\", err.Error(), value)\n\t}\n\treturn ownerInformation, nil\n}\n\nfunc SaveSchemaOwner(ctx context.Context, storage storage.Storage, owner *SchemaOwnerInformation) *schema.Diagnostics {\n\tmarshal, err := json.Marshal(owner)\n\tif err != nil {\n\t\treturn schema.NewDiagnostics().AddErrorMsg(\"failed to marshal schema owner: %s\", err.Error())\n\t}\n\treturn storage.SetKey(ctx, SchemaOwnerKey, string(marshal))\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/storage/pgstorage/schema_owner_information_test.go",
    "content": "package pgstorage\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/env\"\n\t\"github.com/selefra/selefra-provider-sdk/storage\"\n\t\"github.com/selefra/selefra-provider-sdk/storage/database_storage/postgresql_storage\"\n\t\"github.com/selefra/selefra-provider-sdk/storage_factory\"\n\t\"github.com/selefra/selefra-utils/pkg/json_util\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc getTestStorage(t *testing.T) storage.Storage {\n\toptions := postgresql_storage.NewPostgresqlStorageOptions(env.GetDatabaseDsn())\n\tstorage, diagnostics := storage_factory.NewStorage(context.Background(), storage_factory.StorageTypePostgresql, options)\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.String())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\treturn storage\n}\n\nfunc TestSaveSchemaOwner(t *testing.T) {\n\thostname, _ := os.Hostname()\n\tstorage := getTestStorage(t)\n\tinformation := &SchemaOwnerInformation{\n\t\tHostname:          hostname,\n\t\tHolderID:          \"test\",\n\t\tConfigurationName: \"\",\n\t\tConfigurationMD5:  \"\",\n\t}\n\td := SaveSchemaOwner(context.Background(), storage, information)\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.String())\n\t}\n\tassert.False(t, utils.HasError(d))\n\n\towner, diagnostics := GetSchemaOwner(context.Background(), storage)\n\tif utils.IsNotEmpty(diagnostics) {\n\t\tt.Log(diagnostics.String())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\tassert.NotNil(t, owner)\n\tt.Log(json_util.ToJsonString(owner))\n}\n"
  },
  {
    "path": "pkg/storage/pgstorage/table_cache_information.go",
    "content": "package pgstorage\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-provider-sdk/storage\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"time\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// TableCacheInformation Some information about the table level cache\ntype TableCacheInformation struct {\n\n\t// Table name\n\tTableName string `json:\"table_name\"`\n\n\t// The last time this table was pulled\n\tLastPullTime time.Time `json:\"last_pull_time\"`\n\n\t// Which batch was pulled off\n\tLastPullId string `json:\"last_pull_id\"`\n}\n\n// ReadTableCacheInformation Reads the table cache information from the database\nfunc ReadTableCacheInformation(ctx context.Context, storage storage.Storage, tableName string) (*TableCacheInformation, *schema.Diagnostics) {\n\tcacheKey := BuildCacheKey(tableName)\n\tvalue, diagnostics := storage.GetValue(ctx, cacheKey)\n\tif utils.HasError(diagnostics) {\n\t\treturn nil, diagnostics\n\t}\n\tif value == \"\" {\n\t\treturn nil, nil\n\t}\n\tinformation := &TableCacheInformation{}\n\terr := json.Unmarshal([]byte(value), information)\n\tif err != nil {\n\t\treturn nil, schema.NewDiagnostics().AddErrorMsg(\"table name = %s, read table cache information unmarshal failed: %s, s = %s\", tableName, err.Error(), value)\n\t}\n\treturn information, nil\n}\n\n// SaveTableCacheInformation Save the table cache information to the kv database\nfunc SaveTableCacheInformation(ctx context.Context, storage storage.Storage, information *TableCacheInformation) *schema.Diagnostics {\n\tcacheKey := BuildCacheKey(information.TableName)\n\tmarshal, err := json.Marshal(information)\n\tif err != nil {\n\t\treturn schema.NewDiagnostics().AddErrorMsg(\"failed to marshal cache information: %s\", err.Error())\n\t}\n\treturn storage.SetKey(ctx, cacheKey, string(marshal))\n}\n\nfunc BuildCacheKey(tableName string) string {\n\treturn \"cache:table:pull:\" + tableName\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "pkg/storage/pgstorage/table_cache_information_test.go",
    "content": "package pgstorage\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n\t\"github.com/selefra/selefra-utils/pkg/json_util\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestReadTableCacheInformation(t *testing.T) {\n\ttestStorage := getTestStorage(t)\n\n\ttableName := \"test_foo_bar\"\n\n\t// save\n\td := SaveTableCacheInformation(context.Background(), testStorage, &TableCacheInformation{\n\t\tTableName:    tableName,\n\t\tLastPullTime: time.Now(),\n\t\tLastPullId:   id_util.RandomId(),\n\t})\n\tif utils.HasError(d) {\n\t\tt.Log(d.String())\n\t}\n\tassert.False(t, utils.HasError(d))\n\n\t// read\n\tinformation, diagnostics := ReadTableCacheInformation(context.Background(), testStorage, tableName)\n\tif utils.HasError(diagnostics) {\n\t\tt.Log(diagnostics.String())\n\t}\n\tassert.False(t, utils.HasError(diagnostics))\n\n\tt.Log(json_util.ToJsonString(information))\n\n}\n"
  },
  {
    "path": "pkg/telemetry/analytics.go",
    "content": "package telemetry\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/json_util\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"sync\"\n)\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// TelemetryEnable Whether to enable usage data reporting\nvar TelemetryEnable = true\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\n// Analytics Represents an interface for analysis\ntype Analytics interface {\n\n\t// Init Initialization analyzer\n\tInit(ctx context.Context) *schema.Diagnostics\n\n\t// Submit the information to be collected\n\tSubmit(ctx context.Context, event *Event) *schema.Diagnostics\n\n\t// Close Turn off analyzer\n\tClose(ctx context.Context) *schema.Diagnostics\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\ntype Event struct {\n\tName       string         `json:\"name\"`\n\tPayloadMap map[string]any `json:\"payload_map\"`\n}\n\nfunc NewEvent(name string) *Event {\n\treturn &Event{\n\t\tName:       name,\n\t\tPayloadMap: make(map[string]any, 0),\n\t}\n}\n\nfunc (x *Event) SetName(name string) *Event {\n\tx.Name = name\n\treturn x\n}\n\nfunc (x *Event) Add(name string, value any) *Event {\n\tx.PayloadMap[name] = value\n\treturn x\n}\n\nfunc (x *Event) ToJsonString() string {\n\treturn json_util.ToJsonString(x)\n}\n\n// ------------------------------------------------ ---------------------------------------------------------------------\n\nvar DefaultAnalytics Analytics\nvar InitOnce sync.Once\n\nfunc Init(ctx context.Context) *schema.Diagnostics {\n\n\tif !TelemetryEnable {\n\t\treturn nil\n\t}\n\n\tDefaultAnalytics = &RudderstackAnalytics{}\n\treturn DefaultAnalytics.Init(ctx)\n}\n\nfunc Submit(ctx context.Context, event *Event) *schema.Diagnostics {\n\n\tif !TelemetryEnable {\n\t\treturn nil\n\t}\n\n\tInitOnce.Do(func() {\n\t\td := Init(context.Background())\n\t\tif utils.HasError(d) {\n\t\t\tlogger.ErrorF(\"init telemetry, msg: %s\", d.String())\n\t\t} else {\n\t\t\tlogger.InfoF(\"init telemetry success\")\n\t\t}\n\t})\n\n\treturn DefaultAnalytics.Submit(ctx, event)\n}\n\nfunc Close(ctx context.Context) *schema.Diagnostics {\n\n\tif !TelemetryEnable {\n\t\treturn nil\n\t}\n\n\tif DefaultAnalytics != nil {\n\t\treturn DefaultAnalytics.Close(ctx)\n\t} else {\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "pkg/telemetry/analytics_test.go",
    "content": "package telemetry\n\nimport (\n\t\"context\"\n\t\"github.com/selefra/selefra/pkg/utils\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestSubmit(t *testing.T) {\n\td := Submit(context.Background(), NewEvent(\"do-something\").Add(\"foo\", \"bar\"))\n\tif utils.IsNotEmpty(d) {\n\t\tt.Log(d.String())\n\t}\n\tassert.False(t, utils.HasError(d))\n\n\tClose(context.Background())\n}\n"
  },
  {
    "path": "pkg/telemetry/rudderstack.go",
    "content": "package telemetry\n\nimport (\n\t\"context\"\n\t\"github.com/rudderlabs/analytics-go/v4\"\n\t\"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n\t\"github.com/selefra/selefra/pkg/cli_env\"\n\t\"github.com/selefra/selefra/pkg/logger\"\n\t\"github.com/selefra/selefra/pkg/selefra_workspace\"\n)\n\ntype RudderstackAnalytics struct {\n\tclient   analytics.Client\n\tdeviceId string\n}\n\nvar _ Analytics = &RudderstackAnalytics{}\n\nfunc (x *RudderstackAnalytics) Init(ctx context.Context) *schema.Diagnostics {\n\t// Instantiates a client to use send messages to the Rudder API.\n\ttoken := cli_env.GetSelefraTelemetryToken()\n\tif token == \"\" {\n\t\tlogger.ErrorF(\"can not find SELEFRA_TELEMETRY_TOKEN\")\n\t\treturn schema.NewDiagnostics().AddErrorMsg(\"you must use env SELEFRA_TELEMETRY_TOKEN set you Rudderstack write key\")\n\t}\n\tclient := analytics.New(token, \"https://selefralefsm.dataplane.rudderstack.com\")\n\n\tx.client = client\n\n\tdeviceId, diagnostics := selefra_workspace.GetDeviceID()\n\tx.deviceId = deviceId\n\n\treturn diagnostics\n}\n\nfunc (x *RudderstackAnalytics) Submit(ctx context.Context, event *Event) *schema.Diagnostics {\n\tif x.client == nil {\n\t\treturn nil\n\t}\n\terr := x.client.Enqueue(analytics.Track{\n\t\tAnonymousId: x.deviceId,\n\t\tMessageId:   id_util.RandomId(),\n\t\tEvent:       event.Name,\n\t\tProperties:  event.PayloadMap,\n\t})\n\treturn schema.NewDiagnostics().AddError(err)\n}\n\nfunc (x *RudderstackAnalytics) Close(ctx context.Context) *schema.Diagnostics {\n\tif x.client != nil {\n\t\terr := x.client.Close()\n\t\tif err != nil {\n\t\t\treturn schema.NewDiagnostics().AddErrorMsg(\"close Rudderstack client failed: %s\", err.Error())\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/utils/browser.go",
    "content": "package utils\n\nimport (\n\t\"errors\"\n\t\"runtime\"\n)\n\n// OpenBrowser Open the given URL with your browser\nfunc OpenBrowser(targetUrl string) (stdout string, stderr string, err error) {\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\treturn RunCommand(\"cmd\", \"/c\", \"start\", targetUrl)\n\tcase \"linux\":\n\t\treturn RunCommand(\"xdg-open\", targetUrl)\n\tcase \"darwin\":\n\t\treturn RunCommand(\"open\", targetUrl)\n\tdefault:\n\t\treturn \"\", \"\", errors.New(\"open browser not supported on this platform\")\n\t}\n}\n"
  },
  {
    "path": "pkg/utils/browser_test.go",
    "content": "package utils\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestOpenBrowser(t *testing.T) {\n\tstdout, stderr, diagnostics := OpenBrowser(\"https://google.com\")\n\tfmt.Println(stderr)\n\tfmt.Println(stdout)\n\tfmt.Println(diagnostics.Error())\n}\n"
  },
  {
    "path": "pkg/utils/channel.go",
    "content": "package utils\n\n//import \"github.com/selefra/selefra-provider-sdk/provider/schema\"\n//\n//func ConnectMessageChannel(destinationMessageChannel, sourceMessageChannelB chan *schema.Diagnostics) {\n//\tfor message := range sourceMessageChannelB {\n//\t\tif destinationMessageChannel != nil {\n//\t\t\tdestinationMessageChannel <- message\n//\t\t}\n//\t}\n//}\n"
  },
  {
    "path": "pkg/utils/close.go",
    "content": "package utils\n\nvar toClose map[string]func() = make(map[string]func())\n\nfunc RegisterClose(name string, close func()) {\n\ttoClose[name] = close\n}\n\nfunc MultiRegisterClose(m map[string]func()) {\n\tfor name, fn := range m {\n\t\ttoClose[name] = fn\n\t}\n}\n\nfunc Close() {\n\tfor _, cleanFn := range toClose {\n\t\tcleanFn()\n\t}\n}\n"
  },
  {
    "path": "pkg/utils/command.go",
    "content": "package utils\n\nimport (\n\t\"bytes\"\n\t\"os/exec\"\n)\n\nfunc RunCommand(command string, args ...string) (stdout string, stderr string, err error) {\n\n\tstdOutBuff := bytes.Buffer{}\n\tstdErrBuff := bytes.Buffer{}\n\n\tcmd := exec.Command(command, args...)\n\tcmd.Stdout = &stdOutBuff\n\tcmd.Stderr = &stdErrBuff\n\n\terr = cmd.Run()\n\n\tstdout = stdOutBuff.String()\n\tstderr = stdErrBuff.String()\n\t//\n\t//if err != nil {\n\t//\tdiagnostics.AddErrorMsg(\"Run command %s error, error msg = %s, stdout = %s, stderr = %s\", command, err.Error(), stdout, stderr)\n\t//}\n\treturn\n}\n"
  },
  {
    "path": "pkg/utils/file.go",
    "content": "package utils\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"gopkg.in/yaml.v3\"\n\t\"os\"\n\t\"path/filepath\"\n)\n\nfunc Exists(filepath string) bool {\n\t_, err := os.Stat(filepath)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc ExistsFile(filepath string) bool {\n\tinfo, err := os.Stat(filepath)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn !info.IsDir()\n}\n\nfunc ExistsDirectory(directoryPath string) bool {\n\tinfo, err := os.Stat(directoryPath)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn info.IsDir()\n}\n\n// EnsureDirectoryExists Make sure the directory exists, and create it if it does not\nfunc EnsureDirectoryExists(directoryPath string) error {\n\t_, err := os.Stat(directoryPath)\n\tif errors.Is(err, os.ErrNotExist) {\n\t\terr := os.MkdirAll(directoryPath, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc EnsureDirectoryNotExists(directoryPath string) error {\n\t_, err := os.Stat(directoryPath)\n\tif errors.Is(err, os.ErrNotExist) {\n\t\treturn nil\n\t}\n\treturn os.RemoveAll(directoryPath)\n}\n\n// EnsureFileExists Make sure the file exists, and if it does not, use the given content to create the file\nfunc EnsureFileExists(fileFullPath string, initBytes []byte) error {\n\n\t_, err := os.Stat(fileFullPath)\n\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif !errors.Is(err, os.ErrNotExist) {\n\t\treturn err\n\t}\n\n\terr = EnsureDirectoryExists(filepath.Dir(fileFullPath))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn os.WriteFile(fileFullPath, initBytes, os.ModePerm)\n}\n\nfunc ReadYamlFile[T any](yamlFilePath string) (T, error) {\n\t//var r T\n\t//yamlFileReader, err := os.Open(yamlFilePath)\n\t//if err != nil {\n\t//\treturn r, fmt.Errorf(\"open file %s error: %s\", yamlFilePath, err.Error())\n\t//}\n\t//config := viper.New()\n\t//config.AddConfigPath(yamlFilePath)\n\t//configType := strings.Replace(path.Ext(yamlFilePath), \".\", \"\", 1)\n\t//config.SetConfigType(configType)\n\t//err = config.ReadConfig(yamlFileReader)\n\t//if err != nil {\n\t//\treturn r, fmt.Errorf(\"read yaml file %s error: %s\", yamlFilePath, err.Error())\n\t//}\n\t//err = config.Unmarshal(&r)\n\t//if err != nil {\n\t//\treturn r, fmt.Errorf(\"unmarshal yaml file %s error: %s\", yamlFilePath, err.Error())\n\t//}\n\t//return r, nil\n\n\tvar r T\n\tyamlFileBytes, err := os.ReadFile(yamlFilePath)\n\tif err != nil {\n\t\treturn r, fmt.Errorf(\"open file %s error: %s\", yamlFilePath, err.Error())\n\t}\n\terr = yaml.Unmarshal(yamlFileBytes, &r)\n\tif err != nil {\n\t\treturn r, fmt.Errorf(\"unmarshal yaml file %s error: %s\", yamlFilePath, err.Error())\n\t}\n\treturn r, nil\n}\n\nfunc ReadJsonFile[T any](jsonFilePath string) (T, error) {\n\tvar r T\n\tjsonBytes, err := os.ReadFile(jsonFilePath)\n\tif err != nil {\n\t\treturn r, fmt.Errorf(\"open file %s error: %s\", jsonFilePath, err.Error())\n\t}\n\n\terr = json.Unmarshal(jsonBytes, &r)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\treturn r, nil\n}\n\nfunc WriteJsonFile[T any](jsonFilePath string, v T) error {\n\tjsonBytes, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.WriteFile(jsonFilePath, jsonBytes, os.ModePerm)\n}\n\nfunc AbsPath(path string) string {\n\tabs, _ := filepath.Abs(path)\n\treturn abs\n}\n"
  },
  {
    "path": "pkg/utils/github.go",
    "content": "package utils\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n// ParseGitHubRepoFullName Parses the full name of GitHub's warehouse into owner and repoName parts\n// example: selefra/registry\nfunc ParseGitHubRepoFullName(repoFullName string) (owner, repo string, err error) {\n\tsplit := strings.Split(repoFullName, \"/\")\n\tif len(split) != 2 {\n\t\treturn \"\", \"\", fmt.Errorf(\"%s is not a valid GitHub repository full name\", repoFullName)\n\t}\n\treturn split[0], split[1], nil\n}\n"
  },
  {
    "path": "pkg/utils/home.go",
    "content": "package utils\n\nimport (\n\t\"errors\"\n\t\"github.com/mitchellh/go-homedir\"\n\t\"os\"\n\t\"path/filepath\"\n)\n\n// Home return selefra home, config in selefra home, an error\n// selefra is in ~/.selefra, it store tokens, downloaded binary files, database files, and other configuration files, etc.\n// configPath is ~/.selefra/.path/config.json, in config.json, the absolute path of the provider binary is declared\nfunc Home() (homeDir string, configPath string, err error) {\n\thome, err := homedir.Dir()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tregistryPath := filepath.Join(home, \".selefra\")\n\t_, err = os.Stat(registryPath)\n\tif errors.Is(err, os.ErrNotExist) {\n\t\terr := os.Mkdir(registryPath, 0755)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\t// provider binary file store in providerPath\n\tproviderPath := filepath.Join(home, \".selefra\", \".path\")\n\n\t_, err = os.Stat(providerPath)\n\tif errors.Is(err, os.ErrNotExist) {\n\t\terr = os.Mkdir(providerPath, 0755)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\tconfig := filepath.Join(home, \".selefra\", \".path\", \"config.json\")\n\n\t_, err = os.Stat(config)\n\tif errors.Is(err, os.ErrNotExist) {\n\t\terr = os.WriteFile(config, []byte(\"{}\"), 0644)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\treturn registryPath, config, nil\n}\n\nfunc GetTempPath() (string, error) {\n\tpath, _, err := Home()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tociPath := filepath.Join(path, \"temp\")\n\t_, err = os.Stat(ociPath)\n\tif errors.Is(err, os.ErrNotExist) {\n\t\terr = os.MkdirAll(ociPath, 0755)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn ociPath, nil\n}\n\nfunc CreateSource(path, version, latest string) (string, string) {\n\tif latest == \"latest\" {\n\t\treturn \"selefra/\" + path + \"@\" + version, \"selefra/\" + path + \"@latest\"\n\t}\n\treturn \"selefra/\" + path + \"@\" + version, \"\"\n}\n"
  },
  {
    "path": "pkg/utils/lock.go",
    "content": "package utils\n\nimport (\n\t\"github.com/selefra/selefra-utils/pkg/id_util\"\n\t\"os\"\n)\n\n// BuildLockOwnerId The current host name is placed in the owner of the lock so that it is easy to identify who is holding the lock\n// This place is mainly used for database locks\nfunc BuildLockOwnerId() string {\n\thostname, err := os.Hostname()\n\tid := id_util.RandomId()\n\tif err != nil {\n\t\treturn \"unknown-hostname-\" + id\n\t} else {\n\t\treturn hostname + \"-\" + id\n\t}\n}\n"
  },
  {
    "path": "pkg/utils/log.go",
    "content": "package utils\n\nimport \"github.com/selefra/selefra-provider-sdk/provider/schema\"\n\nfunc HasError(diagnostics *schema.Diagnostics) bool {\n\treturn diagnostics != nil && diagnostics.HasError()\n}\n\nfunc NotHasError(diagnostics *schema.Diagnostics) bool {\n\treturn !HasError(diagnostics)\n}\n\nfunc IsNotEmpty(diagnostics *schema.Diagnostics) bool {\n\treturn diagnostics != nil && !diagnostics.IsEmpty()\n}\n\nfunc IsEmpty(diagnostics *schema.Diagnostics) bool {\n\treturn diagnostics == nil || diagnostics.IsEmpty()\n}\n"
  },
  {
    "path": "pkg/utils/openai.go",
    "content": "package utils\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com/sashabaranov/go-openai\"\n\t\"strings\"\n)\n\nconst (\n\tGPT3Dot5Turbo = \"gpt-3.5\"\n\tGPT3          = \"gpt-3\"\n\tGPT4          = \"gpt-4\"\n)\n\nvar promptMap = map[string]string{\n\t\"type\": `You are a technical expert in the public cloud and SaaS. You need to help determine which public cloud or SaaS analysis requirement the question belongs to according to the question I provided, and answer it as described below:\n1. For security requirements, please return: security & cloudName\n2. For cost analysis requirements, please return: finops & cloudName\n3. For architecture optimization requirements, please return: architecture & cloudName\n\nDo not output Answer and line breaks\n\nThe question is:\n%s`,\n\t\"securityTable\": `You are a public cloud and SaaS security expert. What you need to do is to determine which database tables and fields will be used in the analysis question based on the tables and security analysis questions I provided, and return them in the following format\n\t\tTable1,Table2,Table3\n\n\t\tDo not output Answer and line breaks\n\t\t\nThe question is:\n%s\n\nTables is:\n%s\n`,\n\t\"securityColumn\": `You are a public cloud and SaaS security expert. What you need to do is to determine which database tables and fields will be used in the analysis question based on the tables and security analysis questions I provided, and return them in the following format\n\nColumn1,Column2,Column3\n\nDo not output Answer and line breaks\n\nDo not return duplicate fields\n\nDo not appear fields that do not exist in the original field\n\nThe question is:\n%s\n\nTable is:\n%s\n\nColumn is:\n%s\n`,\n\t\"security\": `You are a public cloud and SaaS security expert. I will give you a section of %s's %s configuration information data and security analysis question. Please help me detect whether there is a security vulnerability in this configuration. If there is a security problem, please return the vulnerability title, vulnerability description, repair recommendation for the complete executable steps, security level, Tags of the security compliance framework, and return it in the following example format:\n[\n\t{\n\t\t\"title\":\"\",\n\t\t\"description\":\"\",\n\t\t\"remediation\": \"\",\n\t\t\"severity\": \"\",\n\t\t\"tags\":[\"\"],\n\t\t\"resource\":\"\",\n\t}\n]\nThe configuration is:\n%s\n\nThe question is:\n%s\n`,\n\t\"finopsTable\": `You are a public cloud and SaaS cost optimization expert. What you need to do is to determine which database tables and fields will be used in the analysis question based on the tables and security analysis questions I provided, and return them in the following format\n\t\tTable1,Table2,Table3\n\n\t\tDo not output Answer and line breaks\n\t\t\nThe question is:\n%s\n\nTables is:\n%s\n`,\n\t\"finopsColumn\": `You are a public cloud and SaaS cost optimization expert. What you need to do is to determine which database tables and fields will be used in the analysis question based on the tables and security analysis questions I provided, and return them in the following format\n\nColumn1,Column2,Column3\n\nDo not output Answer and line breaks\n\nDo not return duplicate fields\n\nDo not appear fields that do not exist in the original field\n\nThe question is:\n%s\n\nTable is:\n%s\n\nColumn is:\n%s\n`,\n\t\"finops\": `You are a public cloud and SaaS cost optimization expert. I will give you a section of %s's %s configuration information data and security analysis question. Please help me detect whether there is a security vulnerability in this configuration. If there is a security problem, please return the vulnerability title, vulnerability description, repair recommendation for the complete executable steps, security level, Tags of the security compliance framework, and return it in the following example format:\n[\n\t{\n\t\t\"title\":\"\",\n\t\t\"description\":\"\",\n\t\t\"remediation\": \"\",\n\t\t\"severity\": \"\",\n\t\t\"tags\":[\"\"],\n\t\t\"resource\":\"\",\n\t}\n]\nThe configuration is:\n%s\n\nThe question is:\n%s\n`,\n\t\"architectureTable\": `You are a public cloud and SaaS technology architect. What you need to do is to determine which database tables and fields will be used in the analysis question based on the tables and security analysis questions I provided, and return them in the following format\n\t\tTable1,Table2,Table3\n\n\t\tDo not output Answer and line breaks\n\t\t\nThe question is:\n%s\n\nTables is:\n%s\n`,\n\t\"architectureColumn\": `You are a public cloud and SaaS technology architect. What you need to do is to determine which database tables and fields will be used in the analysis question based on the tables and security analysis questions I provided, and return them in the following format\n\nColumn1,Column2,Column3\n\nDo not output Answer and line breaks\n\nDo not return duplicate fields\n\nDo not appear fields that do not exist in the original field\n\nThe question is:\n%s\n\nTable is:\n%s\n\nColumn is:\n%s\n`,\n\t\"architecture\": `You are a public cloud and SaaS technology architect. I will give you a section of %s's %s configuration information data and security analysis question. Please help me detect whether there is a security vulnerability in this configuration. If there is a security problem, please return the vulnerability title, vulnerability description, repair recommendation for the complete executable steps, security level, Tags of the security compliance framework, and return it in the following example format:\n[\n\t{\n\t\t\"title\":\"\",\n\t\t\"description\":\"\",\n\t\t\"remediation\": \"\",\n\t\t\"severity\": \"\",\n\t\t\"tags\":[\"\"],\n\t\t\"resource\":\"\",\n\t}\n]\nThe configuration is:\n%s\n\nThe question is:\n%s\n`,\n}\n\nfunc OpenApiClient(ctx context.Context, sk string, mode string, promptType string, args ...any) (string, error) {\n\tclient := openai.NewClient(sk)\n\tswitch mode {\n\tcase GPT3Dot5Turbo:\n\t\treturn GPT3Dot5TurboFunc(ctx, client, fmt.Sprintf(promptMap[promptType], args...))\n\tcase GPT3:\n\t\tprompt := fmt.Sprintf(promptMap[promptType], args...)\n\t\treturn GPT3Func(ctx, client, prompt)\n\tcase GPT4:\n\t\treturn GPT4TurboFunc(ctx, client, fmt.Sprintf(promptMap[promptType], args...))\n\t}\n\tfmt.Println(\"Failed to find matching GPT version [\\\"gpt-3.5\\\", \\\"gpt-3\\\", \\\"gpt-4\\\"], running with \\\"gpt-3.5\\\" by default.\")\n\treturn GPT3Dot5TurboFunc(ctx, client, fmt.Sprintf(promptMap[promptType], args...))\n}\n\nfunc GPT3Dot5TurboFunc(ctx context.Context, client *openai.Client, prompt string) (string, error) {\n\tresp, err := client.CreateChatCompletion(\n\t\tctx,\n\t\topenai.ChatCompletionRequest{\n\t\t\tModel:     openai.GPT3Dot5Turbo,\n\t\t\tMaxTokens: 512,\n\t\t\tMessages: []openai.ChatCompletionMessage{\n\t\t\t\t{\n\t\t\t\t\tRole:    openai.ChatMessageRoleUser,\n\t\t\t\t\tContent: prompt,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tif err != nil {\n\t\tfmt.Printf(\"ChatCompletion error: %v\\n\", err)\n\t\treturn \"\", err\n\t}\n\treturn strings.Trim(resp.Choices[0].Message.Content, \"\\n\"), nil\n}\n\nfunc GPT4TurboFunc(ctx context.Context, client *openai.Client, prompt string) (string, error) {\n\tresp, err := client.CreateChatCompletion(\n\t\tctx,\n\t\topenai.ChatCompletionRequest{\n\t\t\tModel:     openai.GPT4,\n\t\t\tMaxTokens: 512,\n\t\t\tMessages: []openai.ChatCompletionMessage{\n\t\t\t\t{\n\t\t\t\t\tRole:    openai.ChatMessageRoleUser,\n\t\t\t\t\tContent: prompt,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\tif err != nil {\n\t\tfmt.Printf(\"ChatCompletion error: %v\\n\", err)\n\t\treturn \"\", err\n\t}\n\treturn strings.Trim(resp.Choices[0].Message.Content, \"\\n\"), nil\n}\n\nfunc GPT3Func(ctx context.Context, client *openai.Client, Prompt string) (string, error) {\n\treq := openai.CompletionRequest{\n\t\tModel:     openai.GPT3TextDavinci003,\n\t\tMaxTokens: 256,\n\t\tPrompt:    Prompt,\n\t}\n\tresp, err := client.CreateCompletion(ctx, req)\n\tif err != nil {\n\t\tfmt.Printf(\"Completion error: %v\\n\", err)\n\t\treturn \"\", err\n\t}\n\treturn strings.Trim(resp.Choices[0].Text, \"\\n\"), nil\n}\n"
  },
  {
    "path": "pkg/utils/reg.go",
    "content": "package utils\n\nimport \"regexp\"\n\nfunc DeleteExtraSpace(s string) string {\n\tregstr := \"\\\\s{2,}\"\n\treg, _ := regexp.Compile(regstr)\n\ttmpstr := make([]byte, len(s))\n\tcopy(tmpstr, s)\n\tspc_index := reg.FindStringIndex(string(tmpstr))\n\tfor len(spc_index) > 0 {\n\t\ttmpstr = append(tmpstr[:spc_index[0]+1], tmpstr[spc_index[1]:]...)\n\t\tspc_index = reg.FindStringIndex(string(tmpstr))\n\t}\n\treturn string(tmpstr)\n}\n"
  },
  {
    "path": "pkg/utils/request.go",
    "content": "package utils\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"strings\"\n)\n\ntype Header struct {\n\tKey   string\n\tValue string\n}\n\nfunc Request(ctx context.Context, method string, _url string, body []byte, headers ...Header) ([]byte, error) {\n\tclient := &http.Client{}\n\tsBody := strings.NewReader(string(body))\n\trequest, err := http.NewRequestWithContext(ctx, method, _url, sBody)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequest.Header.Add(\"Content-Type\", \"application/json\")\n\tfor _, header := range headers {\n\t\trequest.Header.Add(header.Key, header.Value)\n\t}\n\n\tresp, err := client.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, errors.New(\"code not equal 200\")\n\t}\n\trByte, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, errors.New(\"read body err :\" + err.Error())\n\t}\n\treturn rByte, err\n}\n"
  },
  {
    "path": "pkg/utils/slice.go",
    "content": "package utils\n\nfunc FindFirstSameKeyInTwoStringArray(a []string, b []string) string {\n\tfor _, v := range a {\n\t\tfor _, v2 := range b {\n\t\t\tif v2 == v {\n\t\t\t\treturn v\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n"
  },
  {
    "path": "pkg/utils/strava.go",
    "content": "package utils\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"github.com/google/uuid\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc StringToUint64(string2 string) uint64 {\n\tu, _ := strconv.ParseUint(string2, 10, 64)\n\treturn u\n}\n\nfunc Strava(value interface{}) string {\n\tvar key string\n\tif value == nil {\n\t\treturn key\n\t}\n\tswitch value.(type) {\n\tcase float64:\n\t\tft := value.(float64)\n\t\tkey = strconv.FormatFloat(ft, 'f', -1, 64)\n\tcase float32:\n\t\tft := value.(float32)\n\t\tkey = strconv.FormatFloat(float64(ft), 'f', -1, 64)\n\tcase int:\n\t\tit := value.(int)\n\t\tkey = strconv.Itoa(it)\n\tcase uint:\n\t\tit := value.(uint)\n\t\tkey = strconv.Itoa(int(it))\n\tcase int8:\n\t\tit := value.(int8)\n\t\tkey = strconv.Itoa(int(it))\n\tcase uint8:\n\t\tit := value.(uint8)\n\t\tkey = strconv.Itoa(int(it))\n\tcase int16:\n\t\tit := value.(int16)\n\t\tkey = strconv.Itoa(int(it))\n\tcase uint16:\n\t\tit := value.(uint16)\n\t\tkey = strconv.Itoa(int(it))\n\tcase int32:\n\t\tit := value.(int32)\n\t\tkey = strconv.Itoa(int(it))\n\tcase uint32:\n\t\tit := value.(uint32)\n\t\tkey = strconv.Itoa(int(it))\n\tcase int64:\n\t\tit := value.(int64)\n\t\tkey = strconv.FormatInt(it, 10)\n\tcase uint64:\n\t\tit := value.(uint64)\n\t\tkey = strconv.FormatUint(it, 10)\n\tcase string:\n\t\tkey = value.(string)\n\tcase []byte:\n\t\tkey = string(value.([]byte))\n\tcase [16]uint8:\n\t\t// uuid\n\t\tarr := value.([16]uint8)\n\t\tuuid, _ := uuid.FromBytes(arr[:])\n\t\tkey = uuid.String()\n\tdefault:\n\t\tnewValue, _ := json.Marshal(value)\n\t\tkey = string(newValue)\n\t}\n\n\treturn key\n}\n\nfunc RemoveRepeatedElement(arr []string) (newArr []string) {\n\tnewArr = make([]string, 0)\n\tfor i := 0; i < len(arr); i++ {\n\t\trepeat := false\n\t\tfor j := i + 1; j < len(arr); j++ {\n\t\t\tif arr[i] == arr[j] {\n\t\t\t\trepeat = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !repeat {\n\t\t\tnewArr = append(newArr, arr[i])\n\t\t}\n\t}\n\treturn\n}\n\nfunc GenerateString(leftVar, middleVar, rightVar string) string {\n\ttotalLength, err := getTerminalWidth()\n\tif err != nil {\n\t\ttotalLength = 120\n\t}\n\tmiddleLength := totalLength - 10 - len(leftVar) - len(rightVar)\n\tif middleLength <= 0 {\n\t\tmiddleLength = 5\n\t}\n\n\tmiddlePart := strings.Repeat(middleVar, middleLength)\n\n\tresult := fmt.Sprintf(\"%s%s%s\", leftVar, middlePart, rightVar)\n\n\treturn result\n}\n\nfunc HasOne(arr []string, key string) bool {\n\tfor _, v := range arr {\n\t\tif v == key {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "pkg/utils/template.go",
    "content": "package utils\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"text/template\"\n)\n\nfunc RenderingTemplate[T any](templateName, templateString string, data T) (s string, err error) {\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"rendering template error: %v\", r)\n\t\t}\n\t}()\n\n\t// prevent <no value>\n\t//parse, err := template.New(templateName).Option(\"missingkey=zero\").Parse(templateString)\n\tparse, err := template.New(templateName).Parse(templateString)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tbuilder := strings.Builder{}\n\terr = parse.Execute(&builder, data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t//return strings.ReplaceAll(builder.String(), \"<no value>\", \"\"), nil\n\treturn builder.String(), nil\n}\n"
  },
  {
    "path": "pkg/utils/terminal_other.go",
    "content": "//go:build !windows\n// +build !windows\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc getTerminalWidth() (int, error) {\n\tvar size [4]uint16\n\t_, _, err := syscall.Syscall(syscall.SYS_IOCTL, os.Stdout.Fd(), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&size)))\n\tif err != 0 {\n\t\treturn 0, fmt.Errorf(\"failed to get terminal width: %v\", err)\n\t}\n\treturn int(size[1]), nil\n}\n"
  },
  {
    "path": "pkg/utils/terminal_win.go",
    "content": "//go:build windows\n// +build windows\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"golang.org/x/sys/windows\"\n\t\"os\"\n)\n\nfunc getTerminalWidth() (int, error) {\n\tvar info windows.ConsoleScreenBufferInfo\n\terr := windows.GetConsoleScreenBufferInfo(windows.Handle(os.Stdout.Fd()), &info)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"failed to get terminal width: %v\", err)\n\t}\n\treturn int(info.Window.Right - info.Window.Left + 1), nil\n}\n"
  },
  {
    "path": "pkg/version/version.go",
    "content": "package version\n\nimport (\n\t\"fmt\"\n\t\"github.com/hashicorp/go-version\"\n\t\"sort\"\n\t\"strings\"\n)\n\nconst (\n\n\t// VersionLatest Indicates the latest version\n\tVersionLatest = \"latest\"\n\n\t// NameVersionDelimiter The separator character for name and version\n\tNameVersionDelimiter = \"@\"\n)\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// NameAndVersion A key-val ue pair representing a name and version\ntype NameAndVersion struct {\n\tName    string `json:\"name\"`\n\tVersion string `json:\"version\"`\n}\n\nfunc NewNameAndVersion(name, version string) *NameAndVersion {\n\treturn &NameAndVersion{\n\t\tName:    name,\n\t\tVersion: version,\n\t}\n}\n\n// ParseNameAndVersion example: aws@v0.0.1\nfunc ParseNameAndVersion(nameAndVersion string) *NameAndVersion {\n\tsplit := strings.Split(nameAndVersion, NameVersionDelimiter)\n\tvar name, version string\n\tif len(split) > 1 {\n\t\tname = split[0]\n\t\tversion = split[1]\n\t} else {\n\t\tname = split[0]\n\t\tversion = VersionLatest\n\t}\n\treturn &NameAndVersion{\n\t\tName:    name,\n\t\tVersion: version,\n\t}\n}\n\n// IsLatestVersion Check whether the version number indicates the latest version\nfunc (x *NameAndVersion) IsLatestVersion() bool {\n\treturn IsLatestVersion(x.Version)\n}\n\nfunc (x *NameAndVersion) String() string {\n\tif x.Version == \"\" {\n\t\treturn x.Name\n\t}\n\treturn fmt.Sprintf(\"%s%s%s\", x.Name, NameVersionDelimiter, x.Version)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// Sort version numbers\nfunc Sort(versionsRaw []string) []string {\n\n\tversions := make([]*version.Version, len(versionsRaw))\n\tfor i, raw := range versionsRaw {\n\t\tv, _ := version.NewVersion(raw)\n\t\tversions[i] = v\n\t}\n\n\t// After this, the versions are properly sorted\n\tcollection := version.Collection(versions)\n\tsort.Sort(collection)\n\n\tnewVersions := make([]string, len(collection))\n\tfor index, version := range collection {\n\t\tnewVersions[index] = \"v\" + version.String()\n\t}\n\treturn newVersions\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\n// IsConstraintsAllow Determines whether the given version conforms to the version constraint\nfunc IsConstraintsAllow(constraints version.Constraints, version *version.Version) bool {\n\tfor _, c := range constraints {\n\t\tif c.Check(version) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n\nfunc IsLatestVersion(versionString string) bool {\n\treturn versionString == \"\" || VersionLatest == strings.ToLower(versionString)\n}\n\n// ------------------------------------------------- --------------------------------------------------------------------\n"
  },
  {
    "path": "scripts/kill-postgresql.sh",
    "content": "#!/bin/bash\nps -ef | grep postgre | awk '{print $2}' | xargs -i kill {}"
  },
  {
    "path": "tests/workspace/offline/module.yaml",
    "content": "modules:\n  - name: Misconfigure-S3\n    uses:\n      - rules/iam_mfa.yaml\n#      - rules/s3/bucket_acl_publicly_readable.yaml\n#      - rules/s3/bucket_acl_publicly_writeable.yaml\n#      - rules/s3/bucket_allow_http_access.yaml\n#      - rules/s3/bucket_default_encryption_disable.yaml\n#      - rules/s3/bucket_is_not_configured_with_cors_rules.yaml\n#      - rules/s3/bucket_logging_disable.yaml\n#      - rules/s3/bucket_not_configured_block_public_access.yaml\n#      - rules/s3/bucket_object_traversal_by_acl.yaml\n#      - rules/s3/bucket_object_traversal_by_policy.yaml\n#      - rules/s3/bucket_publicly_readable.yaml\n#      - rules/s3/bucket_publicly_writeable.yaml\n#      - rules/s3/bucket_source_ip_not_set.yaml\n#      - rules/s3/bucket_versioning_is_disabled.yaml\n#      - rules/s3/mfa_delete_is_disable.yaml\n#      - rules/s3/the_target_bucket_for_server_access_logging_is_the_bucket_itself.yaml\n"
  },
  {
    "path": "tests/workspace/offline/rules/iam_mfa.yaml",
    "content": "rules:\n  - name: ebs_volume_are_unencrypted\n    query: |\n      SELECT \n        *\n      FROM \n        aws_ec2_ebs_volumes \n      WHERE \n        encrypted = FALSE\n    labels:\n      tag:\n        - Security\n        - Misconfigure\n      author: Selefra\n      standard: Custom\n    metadata:\n      id: SF010302\n      severity: Low\n      provider: AWS\n      resource_type: EC2\n      resource_account_id : '{{.account_id}}'\n      resource_id: '{{.id}}'\n      resource_region: '{{.availability_zone}}'\n      remediation: remediation/ebs_volume_are_unencrypted.md\n      title: EBS volume are unencrypted\n      description: Ensure that EBS volumes are encrypted.\n    output: 'EBS volume are unencrypted, EBS id: {{.id}}, availability zone: {{.availability_zone}}'"
  },
  {
    "path": "tests/workspace/offline/selefra.yaml",
    "content": "selefra:\n    name: testtt\n    cli_version: '{{version}}'\n    providers:\n        - name: aws\n          source: selefra/aws\n          version: latest\nproviders:\n    - name: aws_01\n      cache: 1d\n      provider: aws\n      resources:\n#        - aws_s3_buckets\n#        - aws_s3_accounts\n      ##  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n      #accounts:\n      #    #     Optional. User identification\n      #  - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n      #    #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n      #    shared_config_profile: < PROFILE_NAME >\n      #    #    Optional. Location of shared configuration files\n      #    shared_config_files:\n      #      - <FILE_PATH>\n      #    #   Optional. Location of shared credentials files\n      #    shared_credentials_files:\n      #      - <FILE_PATH>\n      #    #    Optional. Role ARN we want to assume when accessing this account\n      #    role_arn: < YOUR_ROLE_ARN >\n      #    #    Optional. Named role session to grab specific operation under the assumed role\n      #    role_session_name: <SESSION_NAME>\n      #    #    Optional. Any outside of the org account id that has additional control\n      #    external_id: <ID>\n      #    #    Optional. Designated region of servers\n      #    default_region: <REGION_CODE>\n      #    #    Optional. by default assumes all regions\n      #    regions:\n      #      - us-east-1\n      #      - us-west-2\n      ##    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n      #max_attempts: 10\n      ##    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n      #max_backoff: 30"
  },
  {
    "path": "tests/workspace/online/module.yaml",
    "content": "modules:\n  - name: Misconfigure-S3\n    uses:\n      - rules/s3/bucket_acl_publicly_readable.yaml\n      - rules/s3/bucket_acl_publicly_writeable.yaml\n      - rules/s3/bucket_allow_http_access.yaml\n      - rules/s3/bucket_default_encryption_disable.yaml\n      - rules/s3/bucket_is_not_configured_with_cors_rules.yaml\n      - rules/s3/bucket_logging_disable.yaml\n      - rules/s3/bucket_not_configured_block_public_access.yaml\n      - rules/s3/bucket_object_traversal_by_acl.yaml\n      - rules/s3/bucket_object_traversal_by_policy.yaml\n      - rules/s3/bucket_publicly_readable.yaml\n      - rules/s3/bucket_publicly_writeable.yaml\n      - rules/s3/bucket_source_ip_not_set.yaml\n      - rules/s3/bucket_versioning_is_disabled.yaml\n      - rules/s3/mfa_delete_is_disable.yaml\n      - rules/s3/the_target_bucket_for_server_access_logging_is_the_bucket_itself.yaml\n"
  },
  {
    "path": "tests/workspace/online/rules/iam_mfa.yaml",
    "content": "rules:\n  - name: ebs_volume_are_unencrypted\n    query: |\n      SELECT \n        *\n      FROM \n        aws_ec2_ebs_volumes \n      WHERE \n        encrypted = FALSE\n    labels:\n      tag:\n        - Security\n        - Misconfigure\n      author: Selefra\n      standard: Custom\n    metadata:\n      id: SF010302\n      severity: Low\n      provider: AWS\n      resource_type: EC2\n      resource_account_id : '{{.account_id}}'\n      resource_id: '{{.id}}'\n      resource_region: '{{.availability_zone}}'\n      remediation: remediation/ebs_volume_are_unencrypted.md\n      title: EBS volume are unencrypted\n      description: Ensure that EBS volumes are encrypted.\n    output: 'EBS volume are unencrypted, EBS id: {{.id}}, availability zone: {{.availability_zone}}'"
  },
  {
    "path": "tests/workspace/online/selefra.yaml",
    "content": "selefra:\n    cloud:\n        project: testProject\n        organization: cliOnlie\n        hostname: 192.168.0.68:58018\n    name: testProject\n    cli_version: '{{version}}'\n    providers:\n        - name: aws\n          source: selefra/aws\n          version: latest\nproviders:\n    - name: aws\n      resources:\n        - aws_s3_buckets\n        - aws_s3_accounts\n      ##  Optional, Repeated. Add an accounts block for every account you want to assume-role into and fetch data from.\n      #accounts:\n      #    #     Optional. User identification\n      #  - account_name: <UNIQUE ACCOUNT IDENTIFIER>\n      #    #    Optional. Named profile in config or credential file from where Selefra should grab credentials\n      #    shared_config_profile: < PROFILE_NAME >\n      #    #    Optional. Location of shared configuration files\n      #    shared_config_files:\n      #      - <FILE_PATH>\n      #    #   Optional. Location of shared credentials files\n      #    shared_credentials_files:\n      #      - <FILE_PATH>\n      #    #    Optional. Role ARN we want to assume when accessing this account\n      #    role_arn: < YOUR_ROLE_ARN >\n      #    #    Optional. Named role session to grab specific operation under the assumed role\n      #    role_session_name: <SESSION_NAME>\n      #    #    Optional. Any outside of the org account id that has additional control\n      #    external_id: <ID>\n      #    #    Optional. Designated region of servers\n      #    default_region: <REGION_CODE>\n      #    #    Optional. by default assumes all regions\n      #    regions:\n      #      - us-east-1\n      #      - us-west-2\n      ##    The maximum number of times that a request will be retried for failures. Defaults to 10 retry attempts.\n      #max_attempts: 10\n      ##    The maximum back off delay between attempts. The backoff delays exponentially with a jitter based on the number of attempts. Defaults to 30 seconds.\n      #max_backoff: 30\n"
  }
]